diff --git a/CHANGELOG.md b/CHANGELOG.md index a12ef2ad3048..2414b662c539 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,12 +10,11 @@ internal API changes are not present. Main (unreleased) ----------------- -### Security fixes +### Breaking changes -- Fixes following vulnerabilities (@hainenber) - - [GO-2023-2409](https://github.com/advisories/GHSA-mhpq-9638-x6pw) - - [GO-2023-2412](https://github.com/advisories/GHSA-7ww5-4wqc-m92c) - - [CVE-2023-49568](https://github.com/advisories/GHSA-mw99-9chc-xw7r) +- Prohibit the configuration of services within modules. (@wildum) + +- For `otelcol.exporter` components, change the default value of `disable_high_cardinality_metrics` to `true`. (@ptodev) ### Features @@ -23,8 +22,12 @@ Main (unreleased) - A new `pyroscope.java` component for profiling Java processes using async-profiler. (@korniltsev) +- A new `otelcol.processor.resourcedetection` component which inserts resource attributes + to OTLP telemetry based on the host on which Grafana Agent is running. (@ptodev) + ### Enhancements +- Include line numbers in profiles produced by `pyrsocope.java` component. (@korniltsev) - Add an option to the windows static mode installer for expanding environment vars in the yaml config. (@erikbaranowski) - Add authentication support to `loki.source.awsfirehose` (@sberz) @@ -33,6 +36,21 @@ Main (unreleased) - Expose `physical_disk` collector from `windows_exporter` v0.24.0 to Flow configuration. (@hainenber) +- Renamed Grafana Agent Mixin's "prometheus.remote_write" dashboard to + "Prometheus Components" and added charts for `prometheus.scrape` success rate + and duration metrics. (@thampiotr) + +- Removed `ClusterLamportClockDrift` and `ClusterLamportClockStuck` alerts from + Grafana Agent Mixin to focus on alerting on symptoms. (@thampiotr) + +- Increased clustering alert periods to 10 minutes to improve the + signal-to-noise ratio in Grafana Agent Mixin. (@thampiotr) + +- `mimir.rules.kubernetes` has a new `prometheus_http_prefix` argument to configure + the HTTP endpoint on which to connect to Mimir's API. (@hainenber) + +- `service_name` label is inferred from discovery meta labels in `pyroscope.java` (@korniltsev) + ### Bugfixes - Fix an issue in `remote.s3` where the exported content of an object would be an empty string if `remote.s3` failed to fully retrieve @@ -43,6 +61,14 @@ Main (unreleased) - Fix a duplicate metrics registration panic when sending metrics to an static mode metric instance's write handler. (@tpaschalis) +- Fix issue causing duplicate logs when a docker target is restarted. (@captncraig) + +- Fix an issue where blocks having the same type and the same label across + modules could result in missed updates. (@thampiotr) + +- Fix an issue with static integrations-next marshaling where non singletons + would cause `/-/config` to fail to marshal. (@erikbaranowski) + ### Other changes - Removed support for Windows 2012 in line with Microsoft end of life. (@mattdurham) @@ -53,6 +79,32 @@ Main (unreleased) - Use Go 1.21.6 for builds. (@hainenber) +v0.39.2 (2024-1-31) +-------------------- + +### Bugfixes + +- Fix error introduced in v0.39.0 preventing remote write to Amazon Managed Prometheus. (@captncraig) + +- An error will be returned in the converter from Static to Flow when `scrape_integration` is set + to `true` but no `remote_write` is defined. (@erikbaranowski) + + +v0.39.1 (2024-01-19) +-------------------- + +### Security fixes + +- Fixes following vulnerabilities (@hainenber) + - [GO-2023-2409](https://github.com/advisories/GHSA-mhpq-9638-x6pw) + - [GO-2023-2412](https://github.com/advisories/GHSA-7ww5-4wqc-m92c) + - [CVE-2023-49568](https://github.com/advisories/GHSA-mw99-9chc-xw7r) + +### Bugfixes + +- Fix issue where installing the Windows Agent Flow installer would hang then crash. (@mattdurham) + + v0.39.0 (2024-01-09) -------------------- diff --git a/cmd/grafana-agent-operator/DEVELOPERS.md b/cmd/grafana-agent-operator/DEVELOPERS.md index 9c2453e1f9f9..58f7be9ae8d5 100644 --- a/cmd/grafana-agent-operator/DEVELOPERS.md +++ b/cmd/grafana-agent-operator/DEVELOPERS.md @@ -74,7 +74,7 @@ running. ### Apply the CRDs Generated CRDs used by the operator can be found in [the Production -folder](../../production/operator/crds). Deploy them from the root of the +folder](../../operations/agent-static-operator/crds). Deploy them from the root of the repository with: ``` diff --git a/cmd/internal/flowmode/cmd_run.go b/cmd/internal/flowmode/cmd_run.go index c8618b928b85..20cb8fb2ab2e 100644 --- a/cmd/internal/flowmode/cmd_run.go +++ b/cmd/internal/flowmode/cmd_run.go @@ -360,7 +360,7 @@ func getEnabledComponentsFunc(f *flow.Flow) func() map[string]interface{} { components := component.GetAllComponents(f, component.InfoOptions{}) componentNames := map[string]struct{}{} for _, c := range components { - componentNames[c.Registration.Name] = struct{}{} + componentNames[c.ComponentName] = struct{}{} } return map[string]interface{}{"enabled-components": maps.Keys(componentNames)} } diff --git a/component/all/all.go b/component/all/all.go index 437a7a07e59b..0bf3da725bbf 100644 --- a/component/all/all.go +++ b/component/all/all.go @@ -82,6 +82,7 @@ import ( _ "github.com/grafana/agent/component/otelcol/processor/k8sattributes" // Import otelcol.processor.k8sattributes _ "github.com/grafana/agent/component/otelcol/processor/memorylimiter" // Import otelcol.processor.memory_limiter _ "github.com/grafana/agent/component/otelcol/processor/probabilistic_sampler" // Import otelcol.processor.probabilistic_sampler + _ "github.com/grafana/agent/component/otelcol/processor/resourcedetection" // Import otelcol.processor.resourcedetection _ "github.com/grafana/agent/component/otelcol/processor/span" // Import otelcol.processor.span _ "github.com/grafana/agent/component/otelcol/processor/tail_sampling" // Import otelcol.processor.tail_sampling _ "github.com/grafana/agent/component/otelcol/processor/transform" // Import otelcol.processor.transform diff --git a/component/component_provider.go b/component/component_provider.go index 90454b5b04c3..630961d8f6db 100644 --- a/component/component_provider.go +++ b/component/component_provider.go @@ -93,8 +93,8 @@ type Info struct { // this component depends on, or is depended on by, respectively. References, ReferencedBy []string - Registration Registration // Component registration. - Health Health // Current component health. + ComponentName string // Name of the component. + Health Health // Current component health. Arguments Arguments // Current arguments value of the component. Exports Exports // Current exports value of the component. @@ -157,7 +157,7 @@ func (info *Info) MarshalJSON() ([]byte, error) { } return json.Marshal(&componentDetailJSON{ - Name: info.Registration.Name, + Name: info.ComponentName, Type: "block", ModuleID: info.ID.ModuleID, LocalID: info.ID.LocalID, diff --git a/component/loki/source/docker/internal/dockertarget/target.go b/component/loki/source/docker/internal/dockertarget/target.go index b410d42b9cf2..25acdefa5e57 100644 --- a/component/loki/source/docker/internal/dockertarget/target.go +++ b/component/loki/source/docker/internal/dockertarget/target.go @@ -219,6 +219,7 @@ func (t *Target) process(r io.Reader, logStreamLset model.LabelSet) { // labels (e.g. duplicated and relabeled), but this shouldn't be the // case anyway. t.positions.Put(positions.CursorKey(t.containerName), t.labelsStr, ts.Unix()) + t.since = ts.Unix() } } diff --git a/component/loki/source/docker/internal/dockertarget/target_test.go b/component/loki/source/docker/internal/dockertarget/target_test.go index a2d2053e2c9a..979f15ffb751 100644 --- a/component/loki/source/docker/internal/dockertarget/target_test.go +++ b/component/loki/source/docker/internal/dockertarget/target_test.go @@ -9,7 +9,6 @@ import ( "net/http" "net/http/httptest" "os" - "sort" "strings" "testing" "time" @@ -24,6 +23,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/relabel" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -31,7 +31,13 @@ func TestDockerTarget(t *testing.T) { h := func(w http.ResponseWriter, r *http.Request) { switch path := r.URL.Path; { case strings.HasSuffix(path, "/logs"): - dat, err := os.ReadFile("testdata/flog.log") + var filePath string + if strings.Contains(r.URL.RawQuery, "since=0") { + filePath = "testdata/flog.log" + } else { + filePath = "testdata/flog_after_restart.log" + } + dat, err := os.ReadFile(filePath) require.NoError(t, err) _, err = w.Write(dat) require.NoError(t, err) @@ -76,15 +82,6 @@ func TestDockerTarget(t *testing.T) { require.NoError(t, err) tgt.StartIfNotRunning() - require.Eventually(t, func() bool { - return len(entryHandler.Received()) >= 5 - }, 5*time.Second, 100*time.Millisecond) - - received := entryHandler.Received() - sort.Slice(received, func(i, j int) bool { - return received[i].Timestamp.Before(received[j].Timestamp) - }) - expectedLines := []string{ "5.3.69.55 - - [09/Dec/2021:09:15:02 +0000] \"HEAD /brand/users/clicks-and-mortar/front-end HTTP/2.0\" 503 27087", "101.54.183.185 - - [09/Dec/2021:09:15:03 +0000] \"POST /next-generation HTTP/1.0\" 416 11468", @@ -92,9 +89,49 @@ func TestDockerTarget(t *testing.T) { "28.104.242.74 - - [09/Dec/2021:09:15:03 +0000] \"PATCH /value-added/cultivate/systems HTTP/2.0\" 405 11843", "150.187.51.54 - satterfield1852 [09/Dec/2021:09:15:03 +0000] \"GET /incentivize/deliver/innovative/cross-platform HTTP/1.1\" 301 13032", } - actualLines := make([]string, 0, 5) - for _, entry := range received[:5] { - actualLines = append(actualLines, entry.Line) + + assert.EventuallyWithT(t, func(c *assert.CollectT) { + assertExpectedLog(c, entryHandler, expectedLines) + }, 5*time.Second, 100*time.Millisecond, "Expected log lines were not found within the time limit.") + + tgt.Stop() + entryHandler.Clear() + // restart target to simulate container restart + tgt.StartIfNotRunning() + expectedLinesAfterRestart := []string{ + "243.115.12.215 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /morph/exploit/granular HTTP/1.0\" 500 26468", + "221.41.123.237 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /user-centric/whiteboard HTTP/2.0\" 205 22487", + "89.111.144.144 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /open-source/e-commerce HTTP/1.0\" 401 11092", + "62.180.191.187 - - [09/Dec/2023:09:16:57 +0000] \"DELETE /cultivate/integrate/technologies HTTP/2.0\" 302 12979", + "156.249.2.192 - - [09/Dec/2023:09:16:57 +0000] \"POST /revolutionize/mesh/metrics HTTP/2.0\" 401 5297", + } + assert.EventuallyWithT(t, func(c *assert.CollectT) { + assertExpectedLog(c, entryHandler, expectedLinesAfterRestart) + }, 5*time.Second, 100*time.Millisecond, "Expected log lines after restart were not found within the time limit.") +} + +// assertExpectedLog will verify that all expectedLines were received, in any order, without duplicates. +func assertExpectedLog(c *assert.CollectT, entryHandler *fake.Client, expectedLines []string) { + logLines := entryHandler.Received() + testLogLines := make(map[string]int) + for _, l := range logLines { + if containsString(expectedLines, l.Line) { + testLogLines[l.Line] += 1 + } + } + // assert that all log lines were received + assert.Len(c, testLogLines, len(expectedLines)) + // assert that there are no duplicated log lines + for _, v := range testLogLines { + assert.Equal(c, v, 1) + } +} + +func containsString(slice []string, str string) bool { + for _, item := range slice { + if item == str { + return true + } } - require.ElementsMatch(t, actualLines, expectedLines) + return false } diff --git a/component/loki/source/docker/internal/dockertarget/testdata/flog_after_restart.log b/component/loki/source/docker/internal/dockertarget/testdata/flog_after_restart.log new file mode 100644 index 000000000000..59afb576805e Binary files /dev/null and b/component/loki/source/docker/internal/dockertarget/testdata/flog_after_restart.log differ diff --git a/component/mimir/rules/kubernetes/rules.go b/component/mimir/rules/kubernetes/rules.go index 016a888d9104..14765a865095 100644 --- a/component/mimir/rules/kubernetes/rules.go +++ b/component/mimir/rules/kubernetes/rules.go @@ -261,10 +261,11 @@ func (c *Component) init() error { httpClient := c.args.HTTPClientConfig.Convert() c.mimirClient, err = mimirClient.New(c.log, mimirClient.Config{ - ID: c.args.TenantID, - Address: c.args.Address, - UseLegacyRoutes: c.args.UseLegacyRoutes, - HTTPClientConfig: *httpClient, + ID: c.args.TenantID, + Address: c.args.Address, + UseLegacyRoutes: c.args.UseLegacyRoutes, + PrometheusHTTPPrefix: c.args.PrometheusHTTPPrefix, + HTTPClientConfig: *httpClient, }, c.metrics.mimirClientTiming) if err != nil { return err diff --git a/component/mimir/rules/kubernetes/types.go b/component/mimir/rules/kubernetes/types.go index d8e2445e5bf2..390a4f6a4124 100644 --- a/component/mimir/rules/kubernetes/types.go +++ b/component/mimir/rules/kubernetes/types.go @@ -11,6 +11,7 @@ type Arguments struct { Address string `river:"address,attr"` TenantID string `river:"tenant_id,attr,optional"` UseLegacyRoutes bool `river:"use_legacy_routes,attr,optional"` + PrometheusHTTPPrefix string `river:"prometheus_http_prefix,attr,optional"` HTTPClientConfig config.HTTPClientConfig `river:",squash"` SyncInterval time.Duration `river:"sync_interval,attr,optional"` MimirNameSpacePrefix string `river:"mimir_namespace_prefix,attr,optional"` @@ -23,6 +24,7 @@ var DefaultArguments = Arguments{ SyncInterval: 30 * time.Second, MimirNameSpacePrefix: "agent", HTTPClientConfig: config.DefaultHTTPClientConfig, + PrometheusHTTPPrefix: "/prometheus", } // SetToDefault implements river.Defaulter. diff --git a/component/module/git/git.go b/component/module/git/git.go index dfe17ef2cb4a..607fcd4577a6 100644 --- a/component/module/git/git.go +++ b/component/module/git/git.go @@ -12,7 +12,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/component" "github.com/grafana/agent/component/module" - "github.com/grafana/agent/component/module/git/internal/vcs" + "github.com/grafana/agent/internal/vcs" "github.com/grafana/agent/pkg/flow/logging/level" ) diff --git a/component/otelcol/config_debug_metrics.go b/component/otelcol/config_debug_metrics.go index ca8575bee6de..f387f64cbfdf 100644 --- a/component/otelcol/config_debug_metrics.go +++ b/component/otelcol/config_debug_metrics.go @@ -7,7 +7,7 @@ type DebugMetricsArguments struct { // DefaultDebugMetricsArguments holds default settings for DebugMetricsArguments. var DefaultDebugMetricsArguments = DebugMetricsArguments{ - DisableHighCardinalityMetrics: false, + DisableHighCardinalityMetrics: true, } // SetToDefault implements river.Defaulter. diff --git a/component/otelcol/config_k8s.go b/component/otelcol/config_k8s.go new file mode 100644 index 000000000000..b20407fd41fb --- /dev/null +++ b/component/otelcol/config_k8s.go @@ -0,0 +1,35 @@ +package otelcol + +import "fmt" + +const ( + KubernetesAPIConfig_AuthType_None = "none" + KubernetesAPIConfig_AuthType_ServiceAccount = "serviceAccount" + KubernetesAPIConfig_AuthType_KubeConfig = "kubeConfig" + KubernetesAPIConfig_AuthType_TLS = "tls" +) + +// KubernetesAPIConfig contains options relevant to connecting to the K8s API +type KubernetesAPIConfig struct { + // How to authenticate to the K8s API server. This can be one of `none` + // (for no auth), `serviceAccount` (to use the standard service account + // token provided to the agent pod), or `kubeConfig` to use credentials + // from `~/.kube/config`. + AuthType string `river:"auth_type,attr,optional"` + + // When using auth_type `kubeConfig`, override the current context. + Context string `river:"context,attr,optional"` +} + +// Validate returns an error if the config is invalid. +func (c *KubernetesAPIConfig) Validate() error { + switch c.AuthType { + case KubernetesAPIConfig_AuthType_None, + KubernetesAPIConfig_AuthType_ServiceAccount, + KubernetesAPIConfig_AuthType_KubeConfig, + KubernetesAPIConfig_AuthType_TLS: + return nil + default: + return fmt.Errorf("invalid auth_type %q", c.AuthType) + } +} diff --git a/component/otelcol/exporter/loadbalancing/loadbalancing.go b/component/otelcol/exporter/loadbalancing/loadbalancing.go index 3455318fef38..d4b8a87cf5f6 100644 --- a/component/otelcol/exporter/loadbalancing/loadbalancing.go +++ b/component/otelcol/exporter/loadbalancing/loadbalancing.go @@ -59,7 +59,8 @@ var ( Protocol: Protocol{ OTLP: DefaultOTLPConfig, }, - RoutingKey: "traceID", + RoutingKey: "traceID", + DebugMetrics: otelcol.DefaultDebugMetricsArguments, } DefaultOTLPConfig = OtlpConfig{ diff --git a/component/otelcol/exporter/loadbalancing/loadbalancing_test.go b/component/otelcol/exporter/loadbalancing/loadbalancing_test.go index 5e528dd373a3..abc37bc1703d 100644 --- a/component/otelcol/exporter/loadbalancing/loadbalancing_test.go +++ b/component/otelcol/exporter/loadbalancing/loadbalancing_test.go @@ -4,6 +4,7 @@ import ( "testing" "time" + "github.com/grafana/agent/component/otelcol" "github.com/grafana/agent/component/otelcol/exporter/loadbalancing" "github.com/grafana/river" "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter" @@ -268,3 +269,83 @@ func TestConfigConversion(t *testing.T) { }) } } + +func TestDebugMetricsConfig(t *testing.T) { + tests := []struct { + testName string + agentCfg string + expected otelcol.DebugMetricsArguments + }{ + { + testName: "default", + agentCfg: ` + resolver { + static { + hostnames = ["endpoint-1"] + } + } + protocol { + otlp { + client {} + } + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + { + testName: "explicit_false", + agentCfg: ` + resolver { + static { + hostnames = ["endpoint-1"] + } + } + protocol { + otlp { + client {} + } + } + debug_metrics { + disable_high_cardinality_metrics = false + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: false, + }, + }, + { + testName: "explicit_true", + agentCfg: ` + resolver { + static { + hostnames = ["endpoint-1"] + } + } + protocol { + otlp { + client {} + } + } + debug_metrics { + disable_high_cardinality_metrics = true + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + var args loadbalancing.Arguments + require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + _, err := args.Convert() + require.NoError(t, err) + + require.Equal(t, tc.expected, args.DebugMetricsConfig()) + }) + } +} diff --git a/component/otelcol/exporter/logging/logging.go b/component/otelcol/exporter/logging/logging.go index 13d12fbf312e..3156309ab7cf 100644 --- a/component/otelcol/exporter/logging/logging.go +++ b/component/otelcol/exporter/logging/logging.go @@ -41,6 +41,7 @@ var DefaultArguments = Arguments{ Verbosity: configtelemetry.LevelNormal, SamplingInitial: 2, SamplingThereafter: 500, + DebugMetrics: otelcol.DefaultDebugMetricsArguments, } // SetToDefault implements river.Defaulter. diff --git a/component/otelcol/exporter/otlp/otlp.go b/component/otelcol/exporter/otlp/otlp.go index 7ca10d2c2c0b..f473c4722571 100644 --- a/component/otelcol/exporter/otlp/otlp.go +++ b/component/otelcol/exporter/otlp/otlp.go @@ -43,10 +43,11 @@ var _ exporter.Arguments = Arguments{} // DefaultArguments holds default values for Arguments. var DefaultArguments = Arguments{ - Timeout: otelcol.DefaultTimeout, - Queue: otelcol.DefaultQueueArguments, - Retry: otelcol.DefaultRetryArguments, - Client: DefaultGRPCClientArguments, + Timeout: otelcol.DefaultTimeout, + Queue: otelcol.DefaultQueueArguments, + Retry: otelcol.DefaultRetryArguments, + Client: DefaultGRPCClientArguments, + DebugMetrics: otelcol.DefaultDebugMetricsArguments, } // SetToDefault implements river.Defaulter. diff --git a/component/otelcol/exporter/otlp/otlp_test.go b/component/otelcol/exporter/otlp/otlp_test.go index 9c256ab94ba2..13bd8e56883d 100644 --- a/component/otelcol/exporter/otlp/otlp_test.go +++ b/component/otelcol/exporter/otlp/otlp_test.go @@ -143,3 +143,62 @@ func createTestTraces() ptrace.Traces { } return data } + +func TestDebugMetricsConfig(t *testing.T) { + tests := []struct { + testName string + agentCfg string + expected otelcol.DebugMetricsArguments + }{ + { + testName: "default", + agentCfg: ` + client { + endpoint = "tempo-xxx.grafana.net/tempo:443" + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + { + testName: "explicit_false", + agentCfg: ` + client { + endpoint = "tempo-xxx.grafana.net/tempo:443" + } + debug_metrics { + disable_high_cardinality_metrics = false + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: false, + }, + }, + { + testName: "explicit_true", + agentCfg: ` + client { + endpoint = "tempo-xxx.grafana.net/tempo:443" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + var args otlp.Arguments + require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + _, err := args.Convert() + require.NoError(t, err) + + require.Equal(t, tc.expected, args.DebugMetricsConfig()) + }) + } +} diff --git a/component/otelcol/exporter/otlphttp/otlphttp.go b/component/otelcol/exporter/otlphttp/otlphttp.go index 0508ec2e6289..b8d3aeaf6956 100644 --- a/component/otelcol/exporter/otlphttp/otlphttp.go +++ b/component/otelcol/exporter/otlphttp/otlphttp.go @@ -48,9 +48,10 @@ var _ exporter.Arguments = Arguments{} // DefaultArguments holds default values for Arguments. var DefaultArguments = Arguments{ - Queue: otelcol.DefaultQueueArguments, - Retry: otelcol.DefaultRetryArguments, - Client: DefaultHTTPClientArguments, + Queue: otelcol.DefaultQueueArguments, + Retry: otelcol.DefaultRetryArguments, + Client: DefaultHTTPClientArguments, + DebugMetrics: otelcol.DefaultDebugMetricsArguments, } // SetToDefault implements river.Defaulter. diff --git a/component/otelcol/exporter/otlphttp/otlphttp_test.go b/component/otelcol/exporter/otlphttp/otlphttp_test.go index 64e6328b2fb5..6a2449db6204 100644 --- a/component/otelcol/exporter/otlphttp/otlphttp_test.go +++ b/component/otelcol/exporter/otlphttp/otlphttp_test.go @@ -114,3 +114,62 @@ func createTestTraces() ptrace.Traces { } return data } + +func TestDebugMetricsConfig(t *testing.T) { + tests := []struct { + testName string + agentCfg string + expected otelcol.DebugMetricsArguments + }{ + { + testName: "default", + agentCfg: ` + client { + endpoint = "http://tempo:4317" + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + { + testName: "explicit_false", + agentCfg: ` + client { + endpoint = "http://tempo:4317" + } + debug_metrics { + disable_high_cardinality_metrics = false + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: false, + }, + }, + { + testName: "explicit_true", + agentCfg: ` + client { + endpoint = "http://tempo:4317" + } + debug_metrics { + disable_high_cardinality_metrics = true + } + `, + expected: otelcol.DebugMetricsArguments{ + DisableHighCardinalityMetrics: true, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + var args otlphttp.Arguments + require.NoError(t, river.Unmarshal([]byte(tc.agentCfg), &args)) + _, err := args.Convert() + require.NoError(t, err) + + require.Equal(t, tc.expected, args.DebugMetricsConfig()) + }) + } +} diff --git a/component/otelcol/processor/processortest/compare_signals.go b/component/otelcol/processor/processortest/compare_signals.go new file mode 100644 index 000000000000..3fdc52cad1e1 --- /dev/null +++ b/component/otelcol/processor/processortest/compare_signals.go @@ -0,0 +1,46 @@ +package processortest + +import ( + "testing" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/plogtest" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/ptracetest" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +func CompareMetrics(t *testing.T, expected, actual pmetric.Metrics) { + err := pmetrictest.CompareMetrics( + expected, + actual, + pmetrictest.IgnoreResourceMetricsOrder(), + pmetrictest.IgnoreMetricDataPointsOrder(), + pmetrictest.IgnoreMetricsOrder(), + pmetrictest.IgnoreScopeMetricsOrder(), + pmetrictest.IgnoreSummaryDataPointValueAtQuantileSliceOrder(), + pmetrictest.IgnoreTimestamp(), + pmetrictest.IgnoreStartTimestamp(), + ) + require.NoError(t, err) +} + +func CompareLogs(t *testing.T, expected, actual plog.Logs) { + err := plogtest.CompareLogs( + expected, + actual, + ) + require.NoError(t, err) +} + +func CompareTraces(t *testing.T, expected, actual ptrace.Traces) { + err := ptracetest.CompareTraces( + expected, + actual, + ptracetest.IgnoreResourceSpansOrder(), + ptracetest.IgnoreScopeSpansOrder(), + ) + require.NoError(t, err) +} diff --git a/component/otelcol/processor/processortest/compare_signals_test.go b/component/otelcol/processor/processortest/compare_signals_test.go new file mode 100644 index 000000000000..609b1754354c --- /dev/null +++ b/component/otelcol/processor/processortest/compare_signals_test.go @@ -0,0 +1,36 @@ +package processortest + +import ( + "testing" + + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +func Test_ScopeMetricsOrder(t *testing.T) { + metric1 := pmetric.NewMetrics() + metric1_res := metric1.ResourceMetrics().AppendEmpty() + metric1_res.ScopeMetrics().AppendEmpty().Scope().SetName("scope1") + metric1_res.ScopeMetrics().AppendEmpty().Scope().SetName("scope2") + + metric2 := pmetric.NewMetrics() + metric2_res := metric2.ResourceMetrics().AppendEmpty() + metric2_res.ScopeMetrics().AppendEmpty().Scope().SetName("scope2") + metric2_res.ScopeMetrics().AppendEmpty().Scope().SetName("scope1") + + CompareMetrics(t, metric1, metric2) +} + +func Test_ScopeSpansAttributesOrder(t *testing.T) { + trace1 := ptrace.NewTraces() + trace1_span_attr := trace1.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Scope().Attributes() + trace1_span_attr.PutStr("key1", "val1") + trace1_span_attr.PutStr("key2", "val2") + + trace2 := ptrace.NewTraces() + trace2_span_attr := trace2.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Scope().Attributes() + trace2_span_attr.PutStr("key2", "val2") + trace2_span_attr.PutStr("key1", "val1") + + CompareTraces(t, trace1, trace2) +} diff --git a/component/otelcol/processor/processortest/processortest.go b/component/otelcol/processor/processortest/processortest.go index 0298f8e9250b..e9a99ec65024 100644 --- a/component/otelcol/processor/processortest/processortest.go +++ b/component/otelcol/processor/processortest/processortest.go @@ -75,16 +75,16 @@ func TestRunProcessor(c ProcessorRunConfig) { // type traceToLogSignal struct { - logCh chan plog.Logs - inputTrace ptrace.Traces - expectedOuutputLog plog.Logs + logCh chan plog.Logs + inputTrace ptrace.Traces + expectedOutputLog plog.Logs } func NewTraceToLogSignal(inputJson string, expectedOutputJson string) Signal { return &traceToLogSignal{ - logCh: make(chan plog.Logs), - inputTrace: CreateTestTraces(inputJson), - expectedOuutputLog: CreateTestLogs(expectedOutputJson), + logCh: make(chan plog.Logs), + inputTrace: CreateTestTraces(inputJson), + expectedOutputLog: CreateTestLogs(expectedOutputJson), } } @@ -101,10 +101,8 @@ func (s traceToLogSignal) CheckOutput(t *testing.T) { select { case <-time.After(time.Second): require.FailNow(t, "failed waiting for logs") - case tr := <-s.logCh: - trStr := marshalLogs(tr) - expStr := marshalLogs(s.expectedOuutputLog) - require.JSONEq(t, expStr, trStr) + case actualLog := <-s.logCh: + CompareLogs(t, s.expectedOutputLog, actualLog) } } @@ -113,17 +111,17 @@ func (s traceToLogSignal) CheckOutput(t *testing.T) { // type traceToMetricSignal struct { - metricCh chan pmetric.Metrics - inputTrace ptrace.Traces - expectedOuutputMetric pmetric.Metrics + metricCh chan pmetric.Metrics + inputTrace ptrace.Traces + expectedOutputMetric pmetric.Metrics } // Any timestamps inside expectedOutputJson should be set to 0. func NewTraceToMetricSignal(inputJson string, expectedOutputJson string) Signal { return &traceToMetricSignal{ - metricCh: make(chan pmetric.Metrics), - inputTrace: CreateTestTraces(inputJson), - expectedOuutputMetric: CreateTestMetrics(expectedOutputJson), + metricCh: make(chan pmetric.Metrics), + inputTrace: CreateTestTraces(inputJson), + expectedOutputMetric: CreateTestMetrics(expectedOutputJson), } } @@ -135,57 +133,6 @@ func (s traceToMetricSignal) ConsumeInput(ctx context.Context, consumer otelcol. return consumer.ConsumeTraces(ctx, s.inputTrace) } -// Set the timestamp of all data points to 0. -// This helps avoid flaky tests due to timestamps. -func setMetricTimestampToZero(metrics pmetric.Metrics) { - // Loop over all resource metrics - for i := 0; i < metrics.ResourceMetrics().Len(); i++ { - rm := metrics.ResourceMetrics().At(i) - // Loop over all metric scopes. - for j := 0; j < rm.ScopeMetrics().Len(); j++ { - sm := rm.ScopeMetrics().At(j) - // Loop over all metrics. - for k := 0; k < sm.Metrics().Len(); k++ { - m := sm.Metrics().At(k) - switch m.Type() { - case pmetric.MetricTypeSum: - // Loop over all data points. - for l := 0; l < m.Sum().DataPoints().Len(); l++ { - // Set the timestamp to 0 to avoid flaky tests. - dp := m.Sum().DataPoints().At(l) - dp.SetTimestamp(0) - dp.SetStartTimestamp(0) - } - case pmetric.MetricTypeGauge: - // Loop over all data points. - for l := 0; l < m.Gauge().DataPoints().Len(); l++ { - // Set the timestamp to 0 to avoid flaky tests. - dp := m.Gauge().DataPoints().At(l) - dp.SetTimestamp(0) - dp.SetStartTimestamp(0) - } - case pmetric.MetricTypeHistogram: - // Loop over all data points. - for l := 0; l < m.Histogram().DataPoints().Len(); l++ { - // Set the timestamp to 0 to avoid flaky tests. - dp := m.Histogram().DataPoints().At(l) - dp.SetTimestamp(0) - dp.SetStartTimestamp(0) - } - case pmetric.MetricTypeSummary: - // Loop over all data points. - for l := 0; l < m.Summary().DataPoints().Len(); l++ { - // Set the timestamp to 0 to avoid flaky tests. - dp := m.Summary().DataPoints().At(l) - dp.SetTimestamp(0) - dp.SetStartTimestamp(0) - } - } - } - } - } -} - // Wait for the component to finish and check its output. func (s traceToMetricSignal) CheckOutput(t *testing.T) { // Set the timeout to a few seconds so that all components have finished. @@ -196,14 +143,8 @@ func (s traceToMetricSignal) CheckOutput(t *testing.T) { select { case <-time.After(timeout): require.FailNow(t, "failed waiting for metrics") - case tr := <-s.metricCh: - setMetricTimestampToZero(tr) - trStr := marshalMetrics(tr) - - expStr := marshalMetrics(s.expectedOuutputMetric) - // Set a field from the json to an empty string to avoid flaky tests containing timestamps. - - require.JSONEq(t, expStr, trStr) + case actualMetric := <-s.metricCh: + CompareMetrics(t, s.expectedOutputMetric, actualMetric) } } @@ -212,16 +153,16 @@ func (s traceToMetricSignal) CheckOutput(t *testing.T) { // type traceSignal struct { - traceCh chan ptrace.Traces - inputTrace ptrace.Traces - expectedOuutputTrace ptrace.Traces + traceCh chan ptrace.Traces + inputTrace ptrace.Traces + expectedOutputTrace ptrace.Traces } func NewTraceSignal(inputJson string, expectedOutputJson string) Signal { return &traceSignal{ - traceCh: make(chan ptrace.Traces), - inputTrace: CreateTestTraces(inputJson), - expectedOuutputTrace: CreateTestTraces(expectedOutputJson), + traceCh: make(chan ptrace.Traces), + inputTrace: CreateTestTraces(inputJson), + expectedOutputTrace: CreateTestTraces(expectedOutputJson), } } @@ -238,10 +179,8 @@ func (s traceSignal) CheckOutput(t *testing.T) { select { case <-time.After(time.Second): require.FailNow(t, "failed waiting for traces") - case tr := <-s.traceCh: - trStr := marshalTraces(tr) - expStr := marshalTraces(s.expectedOuutputTrace) - require.JSONEq(t, expStr, trStr) + case actualTrace := <-s.traceCh: + CompareTraces(t, s.expectedOutputTrace, actualTrace) } } @@ -256,15 +195,6 @@ func CreateTestTraces(traceJson string) ptrace.Traces { return data } -func marshalTraces(trace ptrace.Traces) string { - marshaler := &ptrace.JSONMarshaler{} - data, err := marshaler.MarshalTraces(trace) - if err != nil { - panic(err) - } - return string(data) -} - // makeTracesOutput returns ConsumerArguments which will forward traces to the // provided channel. func makeTracesOutput(ch chan ptrace.Traces) *otelcol.ConsumerArguments { @@ -289,16 +219,16 @@ func makeTracesOutput(ch chan ptrace.Traces) *otelcol.ConsumerArguments { // type logSignal struct { - logCh chan plog.Logs - inputLog plog.Logs - expectedOuutputLog plog.Logs + logCh chan plog.Logs + inputLog plog.Logs + expectedOutputLog plog.Logs } func NewLogSignal(inputJson string, expectedOutputJson string) Signal { return &logSignal{ - logCh: make(chan plog.Logs), - inputLog: CreateTestLogs(inputJson), - expectedOuutputLog: CreateTestLogs(expectedOutputJson), + logCh: make(chan plog.Logs), + inputLog: CreateTestLogs(inputJson), + expectedOutputLog: CreateTestLogs(expectedOutputJson), } } @@ -315,10 +245,8 @@ func (s logSignal) CheckOutput(t *testing.T) { select { case <-time.After(time.Second): require.FailNow(t, "failed waiting for logs") - case tr := <-s.logCh: - trStr := marshalLogs(tr) - expStr := marshalLogs(s.expectedOuutputLog) - require.JSONEq(t, expStr, trStr) + case actualLog := <-s.logCh: + CompareLogs(t, s.expectedOutputLog, actualLog) } } @@ -352,30 +280,21 @@ func CreateTestLogs(logJson string) plog.Logs { return data } -func marshalLogs(log plog.Logs) string { - marshaler := &plog.JSONMarshaler{} - data, err := marshaler.MarshalLogs(log) - if err != nil { - panic(err) - } - return string(data) -} - // // Metrics // type metricSignal struct { - metricCh chan pmetric.Metrics - inputMetric pmetric.Metrics - expectedOuutputMetric pmetric.Metrics + metricCh chan pmetric.Metrics + inputMetric pmetric.Metrics + expectedOutputMetric pmetric.Metrics } func NewMetricSignal(inputJson string, expectedOutputJson string) Signal { return &metricSignal{ - metricCh: make(chan pmetric.Metrics), - inputMetric: CreateTestMetrics(inputJson), - expectedOuutputMetric: CreateTestMetrics(expectedOutputJson), + metricCh: make(chan pmetric.Metrics), + inputMetric: CreateTestMetrics(inputJson), + expectedOutputMetric: CreateTestMetrics(expectedOutputJson), } } @@ -392,10 +311,8 @@ func (s metricSignal) CheckOutput(t *testing.T) { select { case <-time.After(time.Second): require.FailNow(t, "failed waiting for logs") - case tr := <-s.metricCh: - trStr := marshalMetrics(tr) - expStr := marshalMetrics(s.expectedOuutputMetric) - require.JSONEq(t, expStr, trStr) + case actualMetric := <-s.metricCh: + CompareMetrics(t, s.expectedOutputMetric, actualMetric) } } @@ -428,12 +345,3 @@ func CreateTestMetrics(metricJson string) pmetric.Metrics { } return data } - -func marshalMetrics(metrics pmetric.Metrics) string { - marshaler := &pmetric.JSONMarshaler{} - data, err := marshaler.MarshalMetrics(metrics) - if err != nil { - panic(err) - } - return string(data) -} diff --git a/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go b/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go new file mode 100644 index 000000000000..9b715eac4a12 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/aws/ec2/config.go @@ -0,0 +1,72 @@ +package ec2 + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "ec2" + +// Config defines user-specified configurations unique to the EC2 detector +type Config struct { + // Tags is a list of regex's to match ec2 instance tag keys that users want + // to add as resource attributes to processed data + Tags []string `river:"tags,attr,optional"` + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudAccountID: rac.ResourceAttributeConfig{Enabled: true}, + CloudAvailabilityZone: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + HostID: rac.ResourceAttributeConfig{Enabled: true}, + HostImageID: rac.ResourceAttributeConfig{Enabled: true}, + HostName: rac.ResourceAttributeConfig{Enabled: true}, + HostType: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "tags": append([]string{}, args.Tags...), + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config to enable and disable resource attributes. +type ResourceAttributesConfig struct { + CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` + CloudAvailabilityZone rac.ResourceAttributeConfig `river:"cloud.availability_zone,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` + HostImageID rac.ResourceAttributeConfig `river:"host.image.id,block,optional"` + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` + HostType rac.ResourceAttributeConfig `river:"host.type,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.account.id": r.CloudAccountID.Convert(), + "cloud.availability_zone": r.CloudAvailabilityZone.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + "host.id": r.HostID.Convert(), + "host.image.id": r.HostImageID.Convert(), + "host.name": r.HostName.Convert(), + "host.type": r.HostType.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go b/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go new file mode 100644 index 000000000000..1532bd376567 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/aws/ecs/config.go @@ -0,0 +1,86 @@ +package ecs + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "ecs" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + AwsEcsClusterArn: rac.ResourceAttributeConfig{Enabled: true}, + AwsEcsLaunchtype: rac.ResourceAttributeConfig{Enabled: true}, + AwsEcsTaskArn: rac.ResourceAttributeConfig{Enabled: true}, + AwsEcsTaskFamily: rac.ResourceAttributeConfig{Enabled: true}, + AwsEcsTaskRevision: rac.ResourceAttributeConfig{Enabled: true}, + AwsLogGroupArns: rac.ResourceAttributeConfig{Enabled: true}, + AwsLogGroupNames: rac.ResourceAttributeConfig{Enabled: true}, + AwsLogStreamArns: rac.ResourceAttributeConfig{Enabled: true}, + AwsLogStreamNames: rac.ResourceAttributeConfig{Enabled: true}, + CloudAccountID: rac.ResourceAttributeConfig{Enabled: true}, + CloudAvailabilityZone: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args *Config) Convert() map[string]interface{} { + if args == nil { + return nil + } + + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for ecs resource attributes. +type ResourceAttributesConfig struct { + AwsEcsClusterArn rac.ResourceAttributeConfig `river:"aws.ecs.cluster.arn,block,optional"` + AwsEcsLaunchtype rac.ResourceAttributeConfig `river:"aws.ecs.launchtype,block,optional"` + AwsEcsTaskArn rac.ResourceAttributeConfig `river:"aws.ecs.task.arn,block,optional"` + AwsEcsTaskFamily rac.ResourceAttributeConfig `river:"aws.ecs.task.family,block,optional"` + AwsEcsTaskRevision rac.ResourceAttributeConfig `river:"aws.ecs.task.revision,block,optional"` + AwsLogGroupArns rac.ResourceAttributeConfig `river:"aws.log.group.arns,block,optional"` + AwsLogGroupNames rac.ResourceAttributeConfig `river:"aws.log.group.names,block,optional"` + AwsLogStreamArns rac.ResourceAttributeConfig `river:"aws.log.stream.arns,block,optional"` + AwsLogStreamNames rac.ResourceAttributeConfig `river:"aws.log.stream.names,block,optional"` + CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` + CloudAvailabilityZone rac.ResourceAttributeConfig `river:"cloud.availability_zone,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "aws.ecs.cluster.arn": r.AwsEcsClusterArn.Convert(), + "aws.ecs.launchtype": r.AwsEcsLaunchtype.Convert(), + "aws.ecs.task.arn": r.AwsEcsTaskArn.Convert(), + "aws.ecs.task.family": r.AwsEcsTaskFamily.Convert(), + "aws.ecs.task.revision": r.AwsEcsTaskRevision.Convert(), + "aws.log.group.arns": r.AwsLogGroupArns.Convert(), + "aws.log.group.names": r.AwsLogGroupNames.Convert(), + "aws.log.stream.arns": r.AwsLogStreamArns.Convert(), + "aws.log.stream.names": r.AwsLogStreamNames.Convert(), + "cloud.account.id": r.CloudAccountID.Convert(), + "cloud.availability_zone": r.CloudAvailabilityZone.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go b/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go new file mode 100644 index 000000000000..6290180b3086 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/aws/eks/config.go @@ -0,0 +1,46 @@ +package eks + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "eks" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for eks resource attributes. +type ResourceAttributesConfig struct { + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go b/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go new file mode 100644 index 000000000000..dd670372cee7 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk/config.go @@ -0,0 +1,55 @@ +package elasticbeanstalk + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "elasticbeanstalk" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + DeploymentEnvironment: rac.ResourceAttributeConfig{Enabled: true}, + ServiceInstanceID: rac.ResourceAttributeConfig{Enabled: true}, + ServiceVersion: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for elastic_beanstalk resource attributes. +type ResourceAttributesConfig struct { + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + DeploymentEnvironment rac.ResourceAttributeConfig `river:"deployment.environment,block,optional"` + ServiceInstanceID rac.ResourceAttributeConfig `river:"service.instance.id,block,optional"` + ServiceVersion rac.ResourceAttributeConfig `river:"service.version,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "deployment.environment": r.DeploymentEnvironment.Convert(), + "service.instance.id": r.ServiceInstanceID.Convert(), + "service.version": r.ServiceVersion.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go b/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go new file mode 100644 index 000000000000..19a4cc7b4e80 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/aws/lambda/config.go @@ -0,0 +1,67 @@ +package lambda + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "lambda" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + AwsLogGroupNames: rac.ResourceAttributeConfig{Enabled: true}, + AwsLogStreamNames: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + FaasInstance: rac.ResourceAttributeConfig{Enabled: true}, + FaasMaxMemory: rac.ResourceAttributeConfig{Enabled: true}, + FaasName: rac.ResourceAttributeConfig{Enabled: true}, + FaasVersion: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for lambda resource attributes. +type ResourceAttributesConfig struct { + AwsLogGroupNames rac.ResourceAttributeConfig `river:"aws.log.group.names,block,optional"` + AwsLogStreamNames rac.ResourceAttributeConfig `river:"aws.log.stream.names,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + FaasInstance rac.ResourceAttributeConfig `river:"faas.instance,block,optional"` + FaasMaxMemory rac.ResourceAttributeConfig `river:"faas.max_memory,block,optional"` + FaasName rac.ResourceAttributeConfig `river:"faas.name,block,optional"` + FaasVersion rac.ResourceAttributeConfig `river:"faas.version,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "aws.log.group.names": r.AwsLogGroupNames.Convert(), + "aws.log.stream.names": r.AwsLogStreamNames.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + "faas.instance": r.FaasInstance.Convert(), + "faas.max_memory": r.FaasMaxMemory.Convert(), + "faas.name": r.FaasName.Convert(), + "faas.version": r.FaasVersion.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go b/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go new file mode 100644 index 000000000000..4501c4e33a6f --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/azure/aks/config.go @@ -0,0 +1,46 @@ +package aks + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "aks" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for aks resource attributes. +type ResourceAttributesConfig struct { + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/azure/config.go b/component/otelcol/processor/resourcedetection/internal/azure/config.go new file mode 100644 index 000000000000..05e612d1d2d0 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/azure/config.go @@ -0,0 +1,70 @@ +package azure + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "azure" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + AzureResourcegroupName: rac.ResourceAttributeConfig{Enabled: true}, + AzureVMName: rac.ResourceAttributeConfig{Enabled: true}, + AzureVMScalesetName: rac.ResourceAttributeConfig{Enabled: true}, + AzureVMSize: rac.ResourceAttributeConfig{Enabled: true}, + CloudAccountID: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + HostID: rac.ResourceAttributeConfig{Enabled: true}, + HostName: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for azure resource attributes. +type ResourceAttributesConfig struct { + AzureResourcegroupName rac.ResourceAttributeConfig `river:"azure.resourcegroup.name,block,optional"` + AzureVMName rac.ResourceAttributeConfig `river:"azure.vm.name,block,optional"` + AzureVMScalesetName rac.ResourceAttributeConfig `river:"azure.vm.scaleset.name,block,optional"` + AzureVMSize rac.ResourceAttributeConfig `river:"azure.vm.size,block,optional"` + CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "azure.resourcegroup.name": r.AzureResourcegroupName.Convert(), + "azure.vm.name": r.AzureVMName.Convert(), + "azure.vm.scaleset.name": r.AzureVMScalesetName.Convert(), + "azure.vm.size": r.AzureVMSize.Convert(), + "cloud.account.id": r.CloudAccountID.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + "host.id": r.HostID.Convert(), + "host.name": r.HostName.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/consul/config.go b/component/otelcol/processor/resourcedetection/internal/consul/config.go new file mode 100644 index 000000000000..4cc2e9b5beb3 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/consul/config.go @@ -0,0 +1,94 @@ +package consul + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" + "github.com/grafana/river/rivertypes" + "go.opentelemetry.io/collector/config/configopaque" +) + +const Name = "consul" + +// The struct requires no user-specified fields by default as consul agent's default +// configuration will be provided to the API client. +// See `consul.go#NewDetector` for more information. +type Config struct { + // Address is the address of the Consul server + Address string `river:"address,attr,optional"` + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string `river:"datacenter,attr,optional"` + + // Token is used to provide a per-request ACL token which overrides the + // agent's default (empty) token. Token is only required if + // [Consul's ACL System](https://www.consul.io/docs/security/acl/acl-system) + // is enabled. + Token rivertypes.Secret `river:"token,attr,optional"` + + // TokenFile is not necessary in River because users can use the local.file + // Flow component instead. + // + // TokenFile string `river:"token_file"` + + // Namespace is the name of the namespace to send along for the request + // when no other Namespace is present in the QueryOptions + Namespace string `river:"namespace,attr,optional"` + + // Allowlist of [Consul Metadata](https://www.consul.io/docs/agent/options#node_meta) + // keys to use as resource attributes. + MetaLabels []string `river:"meta,attr,optional"` + + // ResourceAttributes configuration for Consul detector + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + HostID: rac.ResourceAttributeConfig{Enabled: true}, + HostName: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + //TODO(ptodev): Change the OTel Collector's "meta" param to be a slice instead of a map. + var metaLabels map[string]string + if args.MetaLabels != nil { + metaLabels = make(map[string]string, len(args.MetaLabels)) + for _, label := range args.MetaLabels { + metaLabels[label] = "" + } + } + + return map[string]interface{}{ + "address": args.Address, + "datacenter": args.Datacenter, + "token": configopaque.String(args.Token), + "namespace": args.Namespace, + "meta": metaLabels, + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for consul resource attributes. +type ResourceAttributesConfig struct { + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` +} + +func (r *ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.region": r.CloudRegion.Convert(), + "host.id": r.HostID.Convert(), + "host.name": r.HostName.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/docker/config.go b/component/otelcol/processor/resourcedetection/internal/docker/config.go new file mode 100644 index 000000000000..f8c1bdc39b82 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/docker/config.go @@ -0,0 +1,46 @@ +package docker + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "docker" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + HostName: rac.ResourceAttributeConfig{Enabled: true}, + OsType: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for docker resource attributes. +type ResourceAttributesConfig struct { + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` + OsType rac.ResourceAttributeConfig `river:"os.type,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "host.name": r.HostName.Convert(), + "os.type": r.OsType.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/gcp/config.go b/component/otelcol/processor/resourcedetection/internal/gcp/config.go new file mode 100644 index 000000000000..76395828a97c --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/gcp/config.go @@ -0,0 +1,91 @@ +package gcp + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "gcp" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudAccountID: rac.ResourceAttributeConfig{Enabled: true}, + CloudAvailabilityZone: rac.ResourceAttributeConfig{Enabled: true}, + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + FaasID: rac.ResourceAttributeConfig{Enabled: true}, + FaasInstance: rac.ResourceAttributeConfig{Enabled: true}, + FaasName: rac.ResourceAttributeConfig{Enabled: true}, + FaasVersion: rac.ResourceAttributeConfig{Enabled: true}, + GcpCloudRunJobExecution: rac.ResourceAttributeConfig{Enabled: true}, + GcpCloudRunJobTaskIndex: rac.ResourceAttributeConfig{Enabled: true}, + GcpGceInstanceHostname: rac.ResourceAttributeConfig{Enabled: false}, + GcpGceInstanceName: rac.ResourceAttributeConfig{Enabled: false}, + HostID: rac.ResourceAttributeConfig{Enabled: true}, + HostName: rac.ResourceAttributeConfig{Enabled: true}, + HostType: rac.ResourceAttributeConfig{Enabled: true}, + K8sClusterName: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for gcp resource attributes. +type ResourceAttributesConfig struct { + CloudAccountID rac.ResourceAttributeConfig `river:"cloud.account.id,block,optional"` + CloudAvailabilityZone rac.ResourceAttributeConfig `river:"cloud.availability_zone,block,optional"` + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + FaasID rac.ResourceAttributeConfig `river:"faas.id,block,optional"` + FaasInstance rac.ResourceAttributeConfig `river:"faas.instance,block,optional"` + FaasName rac.ResourceAttributeConfig `river:"faas.name,block,optional"` + FaasVersion rac.ResourceAttributeConfig `river:"faas.version,block,optional"` + GcpCloudRunJobExecution rac.ResourceAttributeConfig `river:"gcp.cloud_run.job.execution,block,optional"` + GcpCloudRunJobTaskIndex rac.ResourceAttributeConfig `river:"gcp.cloud_run.job.task_index,block,optional"` + GcpGceInstanceHostname rac.ResourceAttributeConfig `river:"gcp.gce.instance.hostname,block,optional"` + GcpGceInstanceName rac.ResourceAttributeConfig `river:"gcp.gce.instance.name,block,optional"` + HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` + HostType rac.ResourceAttributeConfig `river:"host.type,block,optional"` + K8sClusterName rac.ResourceAttributeConfig `river:"k8s.cluster.name,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.account.id": r.CloudAccountID.Convert(), + "cloud.availability_zone": r.CloudAvailabilityZone.Convert(), + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + "faas.id": r.FaasID.Convert(), + "faas.instance": r.FaasInstance.Convert(), + "faas.name": r.FaasName.Convert(), + "faas.version": r.FaasVersion.Convert(), + "gcp.cloud_run.job.execution": r.GcpCloudRunJobExecution.Convert(), + "gcp.cloud_run.job.task_index": r.GcpCloudRunJobTaskIndex.Convert(), + "gcp.gce.instance.hostname": r.GcpGceInstanceHostname.Convert(), + "gcp.gce.instance.name": r.GcpGceInstanceName.Convert(), + "host.id": r.HostID.Convert(), + "host.name": r.HostName.Convert(), + "host.type": r.HostType.Convert(), + "k8s.cluster.name": r.K8sClusterName.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/heroku/config.go b/component/otelcol/processor/resourcedetection/internal/heroku/config.go new file mode 100644 index 000000000000..6e7681269abb --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/heroku/config.go @@ -0,0 +1,64 @@ +package heroku + +import ( + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "heroku" + +type Config struct { + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + HerokuAppID: rac.ResourceAttributeConfig{Enabled: true}, + HerokuDynoID: rac.ResourceAttributeConfig{Enabled: true}, + HerokuReleaseCommit: rac.ResourceAttributeConfig{Enabled: true}, + HerokuReleaseCreationTimestamp: rac.ResourceAttributeConfig{Enabled: true}, + ServiceInstanceID: rac.ResourceAttributeConfig{Enabled: true}, + ServiceName: rac.ResourceAttributeConfig{Enabled: true}, + ServiceVersion: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for heroku resource attributes. +type ResourceAttributesConfig struct { + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + HerokuAppID rac.ResourceAttributeConfig `river:"heroku.app.id,block,optional"` + HerokuDynoID rac.ResourceAttributeConfig `river:"heroku.dyno.id,block,optional"` + HerokuReleaseCommit rac.ResourceAttributeConfig `river:"heroku.release.commit,block,optional"` + HerokuReleaseCreationTimestamp rac.ResourceAttributeConfig `river:"heroku.release.creation_timestamp,block,optional"` + ServiceInstanceID rac.ResourceAttributeConfig `river:"service.instance.id,block,optional"` + ServiceName rac.ResourceAttributeConfig `river:"service.name,block,optional"` + ServiceVersion rac.ResourceAttributeConfig `river:"service.version,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.provider": r.CloudProvider.Convert(), + "heroku.app.id": r.HerokuAppID.Convert(), + "heroku.dyno.id": r.HerokuDynoID.Convert(), + "heroku.release.commit": r.HerokuReleaseCommit.Convert(), + "heroku.release.creation_timestamp": r.HerokuReleaseCreationTimestamp.Convert(), + "service.instance.id": r.ServiceInstanceID.Convert(), + "service.name": r.ServiceName.Convert(), + "service.version": r.ServiceVersion.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/k8snode/config.go b/component/otelcol/processor/resourcedetection/internal/k8snode/config.go new file mode 100644 index 000000000000..8d47362eecb6 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/k8snode/config.go @@ -0,0 +1,75 @@ +package k8snode + +import ( + "github.com/grafana/agent/component/otelcol" + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "kubernetes_node" + +type Config struct { + KubernetesAPIConfig otelcol.KubernetesAPIConfig `river:",squash"` + // NodeFromEnv can be used to extract the node name from an environment + // variable. The value must be the name of the environment variable. + // This is useful when the node where an Agent will run on cannot be + // predicted. In such cases, the Kubernetes downward API can be used to + // add the node name to each pod as an environment variable. K8s tagger + // can then read this value and filter pods by it. + // + // For example, node name can be passed to each agent with the downward API as follows + // + // env: + // - name: K8S_NODE_NAME + // valueFrom: + // fieldRef: + // fieldPath: spec.nodeName + // + // Then the NodeFromEnv field can be set to `K8S_NODE_NAME` to filter all pods by the node that + // the agent is running on. + // + // More on downward API here: https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/ + NodeFromEnvVar string `river:"node_from_env_var,attr,optional"` + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +var DefaultArguments = Config{ + KubernetesAPIConfig: otelcol.KubernetesAPIConfig{ + AuthType: otelcol.KubernetesAPIConfig_AuthType_None, + }, + NodeFromEnvVar: "K8S_NODE_NAME", + ResourceAttributes: ResourceAttributesConfig{ + K8sNodeName: rac.ResourceAttributeConfig{Enabled: true}, + K8sNodeUID: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (c *Config) SetToDefault() { + *c = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + //TODO: K8sAPIConfig is squashed - is there a better way to "convert" it? + "auth_type": args.KubernetesAPIConfig.AuthType, + "context": args.KubernetesAPIConfig.Context, + "node_from_env_var": args.NodeFromEnvVar, + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for k8snode resource attributes. +type ResourceAttributesConfig struct { + K8sNodeName rac.ResourceAttributeConfig `river:"k8s.node.name,block,optional"` + K8sNodeUID rac.ResourceAttributeConfig `river:"k8s.node.uid,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "k8s.node.name": r.K8sNodeName.Convert(), + "k8s.node.uid": r.K8sNodeUID.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/openshift/config.go b/component/otelcol/processor/resourcedetection/internal/openshift/config.go new file mode 100644 index 000000000000..362cd9bff459 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/openshift/config.go @@ -0,0 +1,68 @@ +package openshift + +import ( + "github.com/grafana/agent/component/otelcol" + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "openshift" + +// Config can contain user-specified inputs to overwrite default values. +// See `openshift.go#NewDetector` for more information. +type Config struct { + // Address is the address of the openshift api server + Address string `river:"address,attr,optional"` + + // Token is used to identify against the openshift api server + Token string `river:"token,attr,optional"` + + // TLSSettings contains TLS configurations that are specific to client + // connection used to communicate with the Openshift API. + TLSSettings otelcol.TLSClientArguments `river:"tls,block,optional"` + + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +// DefaultArguments holds default settings for Config. +var DefaultArguments = Config{ + ResourceAttributes: ResourceAttributesConfig{ + CloudPlatform: rac.ResourceAttributeConfig{Enabled: true}, + CloudProvider: rac.ResourceAttributeConfig{Enabled: true}, + CloudRegion: rac.ResourceAttributeConfig{Enabled: true}, + K8sClusterName: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (args *Config) SetToDefault() { + *args = DefaultArguments +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "address": args.Address, + "token": args.Token, + "tls": args.TLSSettings.Convert(), + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for openshift resource attributes. +type ResourceAttributesConfig struct { + CloudPlatform rac.ResourceAttributeConfig `river:"cloud.platform,block,optional"` + CloudProvider rac.ResourceAttributeConfig `river:"cloud.provider,block,optional"` + CloudRegion rac.ResourceAttributeConfig `river:"cloud.region,block,optional"` + K8sClusterName rac.ResourceAttributeConfig `river:"k8s.cluster.name,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "cloud.platform": r.CloudPlatform.Convert(), + "cloud.provider": r.CloudProvider.Convert(), + "cloud.region": r.CloudRegion.Convert(), + "k8s.cluster.name": r.K8sClusterName.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/resource_attribute_config/resource_attribute_config.go b/component/otelcol/processor/resourcedetection/internal/resource_attribute_config/resource_attribute_config.go new file mode 100644 index 000000000000..ff5540a2f539 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/resource_attribute_config/resource_attribute_config.go @@ -0,0 +1,12 @@ +package resource_attribute_config + +// Configures whether a resource attribute should be enabled or not. +type ResourceAttributeConfig struct { + Enabled bool `river:"enabled,attr"` +} + +func (r ResourceAttributeConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "enabled": r.Enabled, + } +} diff --git a/component/otelcol/processor/resourcedetection/internal/system/config.go b/component/otelcol/processor/resourcedetection/internal/system/config.go new file mode 100644 index 000000000000..82e25cb45e97 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/internal/system/config.go @@ -0,0 +1,95 @@ +package system + +import ( + "fmt" + + rac "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/resource_attribute_config" + "github.com/grafana/river" +) + +const Name = "system" + +// Config defines user-specified configurations unique to the system detector +type Config struct { + // The HostnameSources is a priority list of sources from which hostname will be fetched. + // In case of the error in fetching hostname from source, + // the next source from the list will be considered. + HostnameSources []string `river:"hostname_sources,attr,optional"` + + ResourceAttributes ResourceAttributesConfig `river:"resource_attributes,block,optional"` +} + +var DefaultArguments = Config{ + HostnameSources: []string{"dns", "os"}, + ResourceAttributes: ResourceAttributesConfig{ + HostArch: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUCacheL2Size: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUFamily: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUModelID: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUModelName: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUStepping: rac.ResourceAttributeConfig{Enabled: false}, + HostCPUVendorID: rac.ResourceAttributeConfig{Enabled: false}, + HostID: rac.ResourceAttributeConfig{Enabled: false}, + HostName: rac.ResourceAttributeConfig{Enabled: true}, + OsDescription: rac.ResourceAttributeConfig{Enabled: false}, + OsType: rac.ResourceAttributeConfig{Enabled: true}, + }, +} + +var _ river.Defaulter = (*Config)(nil) + +// SetToDefault implements river.Defaulter. +func (c *Config) SetToDefault() { + *c = DefaultArguments +} + +// Validate config +func (cfg *Config) Validate() error { + for _, hostnameSource := range cfg.HostnameSources { + switch hostnameSource { + case "os", "dns", "cname", "lookup": + // Valid option - nothing to do + default: + return fmt.Errorf("invalid hostname source: %s", hostnameSource) + } + } + return nil +} + +func (args Config) Convert() map[string]interface{} { + return map[string]interface{}{ + "hostname_sources": args.HostnameSources, + "resource_attributes": args.ResourceAttributes.Convert(), + } +} + +// ResourceAttributesConfig provides config for system resource attributes. +type ResourceAttributesConfig struct { + HostArch rac.ResourceAttributeConfig `river:"host.arch,block,optional"` + HostCPUCacheL2Size rac.ResourceAttributeConfig `river:"host.cpu.cache.l2.size,block,optional"` + HostCPUFamily rac.ResourceAttributeConfig `river:"host.cpu.family,block,optional"` + HostCPUModelID rac.ResourceAttributeConfig `river:"host.cpu.model.id,block,optional"` + HostCPUModelName rac.ResourceAttributeConfig `river:"host.cpu.model.name,block,optional"` + HostCPUStepping rac.ResourceAttributeConfig `river:"host.cpu.stepping,block,optional"` + HostCPUVendorID rac.ResourceAttributeConfig `river:"host.cpu.vendor.id,block,optional"` + HostID rac.ResourceAttributeConfig `river:"host.id,block,optional"` + HostName rac.ResourceAttributeConfig `river:"host.name,block,optional"` + OsDescription rac.ResourceAttributeConfig `river:"os.description,block,optional"` + OsType rac.ResourceAttributeConfig `river:"os.type,block,optional"` +} + +func (r ResourceAttributesConfig) Convert() map[string]interface{} { + return map[string]interface{}{ + "host.arch": r.HostArch.Convert(), + "host.cpu.cache.l2.size": r.HostCPUCacheL2Size.Convert(), + "host.cpu.family": r.HostCPUFamily.Convert(), + "host.cpu.model.id": r.HostCPUModelID.Convert(), + "host.cpu.model.name": r.HostCPUModelName.Convert(), + "host.cpu.stepping": r.HostCPUStepping.Convert(), + "host.cpu.vendor.id": r.HostCPUVendorID.Convert(), + "host.id": r.HostID.Convert(), + "host.name": r.HostName.Convert(), + "os.description": r.OsDescription.Convert(), + "os.type": r.OsType.Convert(), + } +} diff --git a/component/otelcol/processor/resourcedetection/resourcedetection.go b/component/otelcol/processor/resourcedetection/resourcedetection.go new file mode 100644 index 000000000000..806d72c9d2e5 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/resourcedetection.go @@ -0,0 +1,247 @@ +package resourcedetection + +import ( + "fmt" + "time" + + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/otelcol" + "github.com/grafana/agent/component/otelcol/processor" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/ec2" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/ecs" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/eks" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/lambda" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/azure" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/azure/aks" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/consul" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/docker" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/gcp" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/heroku" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/k8snode" + kubernetes_node "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/k8snode" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/openshift" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/system" + "github.com/grafana/river" + "github.com/mitchellh/mapstructure" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" + otelcomponent "go.opentelemetry.io/collector/component" + otelextension "go.opentelemetry.io/collector/extension" +) + +func init() { + component.Register(component.Registration{ + Name: "otelcol.processor.resourcedetection", + Args: Arguments{}, + Exports: otelcol.ConsumerExports{}, + + Build: func(opts component.Options, args component.Arguments) (component.Component, error) { + fact := resourcedetectionprocessor.NewFactory() + return processor.New(opts, fact, args.(Arguments)) + }, + }) +} + +// Arguments configures the otelcol.processor.resourcedetection component. +type Arguments struct { + // Detectors is an ordered list of named detectors that should be + // run to attempt to detect resource information. + Detectors []string `river:"detectors,attr,optional"` + + // Override indicates whether any existing resource attributes + // should be overridden or preserved. Defaults to true. + Override bool `river:"override,attr,optional"` + + // DetectorConfig is a list of settings specific to all detectors + DetectorConfig DetectorConfig `river:",squash"` + + // HTTP client settings for the detector + // Timeout default is 5s + Timeout time.Duration `river:"timeout,attr,optional"` + // Client otelcol.HTTPClientArguments `river:",squash"` + //TODO: Uncomment this later, and remove Timeout? + // Can we just get away with a timeout, or do we need all the http client settings? + // It seems that HTTP client settings are only used in the ec2 detection via ClientFromContext. + // This seems like a very niche use case, so for now I won't implement it in the Agent. + // If we do implement it in the Agent, I am not sure how to document the HTTP client settings. + // We'd have to mention that they're only for a very specific use case. + + // Output configures where to send processed data. Required. + Output *otelcol.ConsumerArguments `river:"output,block"` +} + +// DetectorConfig contains user-specified configurations unique to all individual detectors +type DetectorConfig struct { + // EC2Config contains user-specified configurations for the EC2 detector + EC2Config ec2.Config `river:"ec2,block,optional"` + + // ECSConfig contains user-specified configurations for the ECS detector + ECSConfig ecs.Config `river:"ecs,block,optional"` + + // EKSConfig contains user-specified configurations for the EKS detector + EKSConfig eks.Config `river:"eks,block,optional"` + + // Elasticbeanstalk contains user-specified configurations for the elasticbeanstalk detector + ElasticbeanstalkConfig elasticbeanstalk.Config `river:"elasticbeanstalk,block,optional"` + + // Lambda contains user-specified configurations for the lambda detector + LambdaConfig lambda.Config `river:"lambda,block,optional"` + + // Azure contains user-specified configurations for the azure detector + AzureConfig azure.Config `river:"azure,block,optional"` + + // Aks contains user-specified configurations for the aks detector + AksConfig aks.Config `river:"aks,block,optional"` + + // ConsulConfig contains user-specified configurations for the Consul detector + ConsulConfig consul.Config `river:"consul,block,optional"` + + // DockerConfig contains user-specified configurations for the docker detector + DockerConfig docker.Config `river:"docker,block,optional"` + + // GcpConfig contains user-specified configurations for the gcp detector + GcpConfig gcp.Config `river:"gcp,block,optional"` + + // HerokuConfig contains user-specified configurations for the heroku detector + HerokuConfig heroku.Config `river:"heroku,block,optional"` + + // SystemConfig contains user-specified configurations for the System detector + SystemConfig system.Config `river:"system,block,optional"` + + // OpenShift contains user-specified configurations for the Openshift detector + OpenShiftConfig openshift.Config `river:"openshift,block,optional"` + + // KubernetesNode contains user-specified configurations for the K8SNode detector + KubernetesNodeConfig kubernetes_node.Config `river:"kubernetes_node,block,optional"` +} + +var ( + _ processor.Arguments = Arguments{} + _ river.Validator = (*Arguments)(nil) + _ river.Defaulter = (*Arguments)(nil) +) + +// DefaultArguments holds default settings for Arguments. +var DefaultArguments = Arguments{ + Detectors: []string{"env"}, + Override: true, + Timeout: 5 * time.Second, + DetectorConfig: DetectorConfig{ + EC2Config: ec2.DefaultArguments, + ECSConfig: ecs.DefaultArguments, + EKSConfig: eks.DefaultArguments, + ElasticbeanstalkConfig: elasticbeanstalk.DefaultArguments, + LambdaConfig: lambda.DefaultArguments, + AzureConfig: azure.DefaultArguments, + AksConfig: aks.DefaultArguments, + ConsulConfig: consul.DefaultArguments, + DockerConfig: docker.DefaultArguments, + GcpConfig: gcp.DefaultArguments, + HerokuConfig: heroku.DefaultArguments, + SystemConfig: system.DefaultArguments, + OpenShiftConfig: openshift.DefaultArguments, + KubernetesNodeConfig: kubernetes_node.DefaultArguments, + }, +} + +// SetToDefault implements river.Defaulter. +func (args *Arguments) SetToDefault() { + *args = DefaultArguments +} + +// Validate implements river.Validator. +func (args *Arguments) Validate() error { + if len(args.Detectors) == 0 { + return fmt.Errorf("at least one detector must be specified") + } + + for _, detector := range args.Detectors { + switch detector { + case "env", + ec2.Name, + ecs.Name, + eks.Name, + elasticbeanstalk.Name, + lambda.Name, + azure.Name, + aks.Name, + consul.Name, + docker.Name, + gcp.Name, + heroku.Name, + system.Name, + openshift.Name, + k8snode.Name: + // Valid option - nothing to do + default: + return fmt.Errorf("invalid detector: %s", detector) + } + } + + return nil +} + +func (args Arguments) ConvertDetectors() []string { + if args.Detectors == nil { + return nil + } + + res := make([]string, 0, len(args.Detectors)) + for _, detector := range args.Detectors { + switch detector { + case k8snode.Name: + res = append(res, "k8snode") + default: + res = append(res, detector) + } + } + return res +} + +// Convert implements processor.Arguments. +func (args Arguments) Convert() (otelcomponent.Config, error) { + input := make(map[string]interface{}) + + input["detectors"] = args.ConvertDetectors() + input["override"] = args.Override + input["timeout"] = args.Timeout + + input["ec2"] = args.DetectorConfig.EC2Config.Convert() + input["ecs"] = args.DetectorConfig.ECSConfig.Convert() + input["eks"] = args.DetectorConfig.EKSConfig.Convert() + input["elasticbeanstalk"] = args.DetectorConfig.ElasticbeanstalkConfig.Convert() + input["lambda"] = args.DetectorConfig.LambdaConfig.Convert() + input["azure"] = args.DetectorConfig.AzureConfig.Convert() + input["aks"] = args.DetectorConfig.AksConfig.Convert() + input["consul"] = args.DetectorConfig.ConsulConfig.Convert() + input["docker"] = args.DetectorConfig.DockerConfig.Convert() + input["gcp"] = args.DetectorConfig.GcpConfig.Convert() + input["heroku"] = args.DetectorConfig.HerokuConfig.Convert() + input["system"] = args.DetectorConfig.SystemConfig.Convert() + input["openshift"] = args.DetectorConfig.OpenShiftConfig.Convert() + input["k8snode"] = args.DetectorConfig.KubernetesNodeConfig.Convert() + + var result resourcedetectionprocessor.Config + err := mapstructure.Decode(input, &result) + + if err != nil { + return nil, err + } + + return &result, nil +} + +// Extensions implements processor.Arguments. +func (args Arguments) Extensions() map[otelcomponent.ID]otelextension.Extension { + return nil +} + +// Exporters implements processor.Arguments. +func (args Arguments) Exporters() map[otelcomponent.DataType]map[otelcomponent.ID]otelcomponent.Component { + return nil +} + +// NextConsumers implements processor.Arguments. +func (args Arguments) NextConsumers() *otelcol.ConsumerArguments { + return args.Output +} diff --git a/component/otelcol/processor/resourcedetection/resourcedetection_test.go b/component/otelcol/processor/resourcedetection/resourcedetection_test.go new file mode 100644 index 000000000000..6fbbf0280e06 --- /dev/null +++ b/component/otelcol/processor/resourcedetection/resourcedetection_test.go @@ -0,0 +1,1527 @@ +package resourcedetection_test + +import ( + "testing" + "time" + + "github.com/grafana/agent/component/otelcol/processor/resourcedetection" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/ec2" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/ecs" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/eks" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/elasticbeanstalk" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/aws/lambda" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/azure" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/azure/aks" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/consul" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/docker" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/gcp" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/heroku" + kubernetes_node "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/k8snode" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/openshift" + "github.com/grafana/agent/component/otelcol/processor/resourcedetection/internal/system" + "github.com/grafana/river" + "github.com/mitchellh/mapstructure" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor" + "github.com/stretchr/testify/require" +) + +func TestArguments_UnmarshalRiver(t *testing.T) { + tests := []struct { + testName string + cfg string + expected map[string]interface{} + errorMsg string + }{ + { + testName: "err_no_detector", + cfg: ` + detectors = [] + output {} + `, + errorMsg: "at least one detector must be specified", + }, + { + testName: "invalid_detector", + cfg: ` + detectors = ["non-existent-detector"] + output {} + `, + errorMsg: "invalid detector: non-existent-detector", + }, + { + testName: "invalid_detector_and_all_valid_ones", + cfg: ` + detectors = ["non-existent-detector2", "env", "ec2", "ecs", "eks", "elasticbeanstalk", "lambda", "azure", "aks", "consul", "docker", "gcp", "heroku", "system", "openshift", "kubernetes_node"] + output {} + `, + errorMsg: "invalid detector: non-existent-detector2", + }, + { + testName: "all_detectors_with_defaults", + cfg: ` + detectors = ["env", "ec2", "ecs", "eks", "elasticbeanstalk", "lambda", "azure", "aks", "consul", "docker", "gcp", "heroku", "system", "openshift", "kubernetes_node"] + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"env", "ec2", "ecs", "eks", "elasticbeanstalk", "lambda", "azure", "aks", "consul", "docker", "gcp", "heroku", "system", "openshift", "k8snode"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "default_detector", + cfg: ` + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"env"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "ec2_defaults", + cfg: ` + detectors = ["ec2"] + ec2 { + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"ec2"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.account.id": map[string]interface{}{"enabled": true}, + "cloud.availability_zone": map[string]interface{}{"enabled": true}, + "cloud.platform": map[string]interface{}{"enabled": true}, + "cloud.provider": map[string]interface{}{"enabled": true}, + "cloud.region": map[string]interface{}{"enabled": true}, + "host.id": map[string]interface{}{"enabled": true}, + "host.image.id": map[string]interface{}{"enabled": true}, + "host.name": map[string]interface{}{"enabled": true}, + "host.type": map[string]interface{}{"enabled": true}, + }, + }, + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "ec2_defaults_empty_resource_attributes", + cfg: ` + detectors = ["ec2"] + ec2 { + resource_attributes {} + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"ec2"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.account.id": map[string]interface{}{"enabled": true}, + "cloud.availability_zone": map[string]interface{}{"enabled": true}, + "cloud.platform": map[string]interface{}{"enabled": true}, + "cloud.provider": map[string]interface{}{"enabled": true}, + "cloud.region": map[string]interface{}{"enabled": true}, + "host.id": map[string]interface{}{"enabled": true}, + "host.image.id": map[string]interface{}{"enabled": true}, + "host.name": map[string]interface{}{"enabled": true}, + "host.type": map[string]interface{}{"enabled": true}, + }, + }, + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "ec2_explicit", + cfg: ` + detectors = ["ec2"] + ec2 { + tags = ["^tag1$", "^tag2$", "^label.*$"] + resource_attributes { + cloud.account.id { enabled = true } + cloud.availability_zone { enabled = true } + cloud.platform { enabled = true } + cloud.provider { enabled = true } + cloud.region { enabled = true } + host.id { enabled = true } + host.image.id { enabled = false } + host.name { enabled = false } + host.type { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"ec2"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": map[string]interface{}{ + "tags": []string{"^tag1$", "^tag2$", "^label.*$"}, + "resource_attributes": map[string]interface{}{ + "cloud.account.id": map[string]interface{}{"enabled": true}, + "cloud.availability_zone": map[string]interface{}{"enabled": true}, + "cloud.platform": map[string]interface{}{"enabled": true}, + "cloud.provider": map[string]interface{}{"enabled": true}, + "cloud.region": map[string]interface{}{"enabled": true}, + "host.id": map[string]interface{}{"enabled": true}, + "host.image.id": map[string]interface{}{"enabled": false}, + "host.name": map[string]interface{}{"enabled": false}, + "host.type": map[string]interface{}{"enabled": false}, + }, + }, + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "ecs_defaults", + cfg: ` + detectors = ["ecs"] + ecs { + resource_attributes { + aws.ecs.cluster.arn { enabled = true } + aws.ecs.launchtype { enabled = true } + aws.ecs.task.arn { enabled = true } + aws.ecs.task.family { enabled = true } + aws.ecs.task.revision { enabled = true } + aws.log.group.arns { enabled = true } + aws.log.group.names { enabled = false } + // aws.log.stream.arns { enabled = true } + // aws.log.stream.names { enabled = true } + // cloud.account.id { enabled = true } + // cloud.availability_zone { enabled = true } + // cloud.platform { enabled = true } + // cloud.provider { enabled = true } + // cloud.region { enabled = true } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"ecs"}, + "timeout": 5 * time.Second, + "override": true, + "ecs": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "aws.ecs.cluster.arn": map[string]interface{}{"enabled": true}, + "aws.ecs.launchtype": map[string]interface{}{"enabled": true}, + "aws.ecs.task.arn": map[string]interface{}{"enabled": true}, + "aws.ecs.task.family": map[string]interface{}{"enabled": true}, + "aws.ecs.task.revision": map[string]interface{}{"enabled": true}, + "aws.log.group.arns": map[string]interface{}{"enabled": true}, + "aws.log.group.names": map[string]interface{}{"enabled": false}, + "aws.log.stream.arns": map[string]interface{}{"enabled": true}, + "aws.log.stream.names": map[string]interface{}{"enabled": true}, + "cloud.account.id": map[string]interface{}{"enabled": true}, + "cloud.availability_zone": map[string]interface{}{"enabled": true}, + "cloud.platform": map[string]interface{}{"enabled": true}, + "cloud.provider": map[string]interface{}{"enabled": true}, + "cloud.region": map[string]interface{}{"enabled": true}, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "ecs_explicit", + cfg: ` + detectors = ["ecs"] + ecs { + resource_attributes { + aws.ecs.cluster.arn { enabled = true } + aws.ecs.launchtype { enabled = true } + aws.ecs.task.arn { enabled = true } + aws.ecs.task.family { enabled = true } + aws.ecs.task.revision { enabled = true } + aws.log.group.arns { enabled = true } + aws.log.group.names { enabled = false } + // aws.log.stream.arns { enabled = true } + // aws.log.stream.names { enabled = true } + // cloud.account.id { enabled = true } + // cloud.availability_zone { enabled = true } + // cloud.platform { enabled = true } + // cloud.provider { enabled = true } + // cloud.region { enabled = true } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"ecs"}, + "timeout": 5 * time.Second, + "override": true, + "ecs": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "aws.ecs.cluster.arn": map[string]interface{}{"enabled": true}, + "aws.ecs.launchtype": map[string]interface{}{"enabled": true}, + "aws.ecs.task.arn": map[string]interface{}{"enabled": true}, + "aws.ecs.task.family": map[string]interface{}{"enabled": true}, + "aws.ecs.task.revision": map[string]interface{}{"enabled": true}, + "aws.log.group.arns": map[string]interface{}{"enabled": true}, + "aws.log.group.names": map[string]interface{}{"enabled": false}, + "aws.log.stream.arns": map[string]interface{}{"enabled": true}, + "aws.log.stream.names": map[string]interface{}{"enabled": true}, + "cloud.account.id": map[string]interface{}{"enabled": true}, + "cloud.availability_zone": map[string]interface{}{"enabled": true}, + "cloud.platform": map[string]interface{}{"enabled": true}, + "cloud.provider": map[string]interface{}{"enabled": true}, + "cloud.region": map[string]interface{}{"enabled": true}, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "eks_defaults", + cfg: ` + detectors = ["eks"] + eks {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"eks"}, + "timeout": 5 * time.Second, + "override": true, + "eks": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "eks_explicit", + cfg: ` + detectors = ["eks"] + eks { + resource_attributes { + cloud.platform { enabled = true } + cloud.provider { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"eks"}, + "timeout": 5 * time.Second, + "override": true, + "eks": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": false, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "azure_defaults", + cfg: ` + detectors = ["azure"] + azure {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"azure"}, + "timeout": 5 * time.Second, + "override": true, + "azure": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "tags": []string{}, + "azure.resourcegroup.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.scaleset.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.size": map[string]interface{}{ + "enabled": true, + }, + "cloud.account.id": map[string]interface{}{ + "enabled": true, + }, + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "cloud.region": map[string]interface{}{ + "enabled": true, + }, + "host.id": map[string]interface{}{ + "enabled": true, + }, + "host.name": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "azure_explicit", + cfg: ` + detectors = ["azure"] + azure { + resource_attributes { + azure.resourcegroup.name { enabled = true } + azure.vm.name { enabled = true } + azure.vm.scaleset.name { enabled = true } + azure.vm.size { enabled = true } + cloud.account.id { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"azure"}, + "timeout": 5 * time.Second, + "override": true, + "azure": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "tags": []string{}, + "azure.resourcegroup.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.scaleset.name": map[string]interface{}{ + "enabled": true, + }, + "azure.vm.size": map[string]interface{}{ + "enabled": true, + }, + "cloud.account.id": map[string]interface{}{ + "enabled": false, + }, + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "cloud.region": map[string]interface{}{ + "enabled": true, + }, + "host.id": map[string]interface{}{ + "enabled": true, + }, + "host.name": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "aks_defaults", + cfg: ` + detectors = ["aks"] + aks {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"aks"}, + "timeout": 5 * time.Second, + "override": true, + "aks": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "aks_explicit", + cfg: ` + detectors = ["aks"] + aks { + resource_attributes { + cloud.platform { enabled = true } + cloud.provider { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"aks"}, + "timeout": 5 * time.Second, + "override": true, + "aks": map[string]interface{}{ + "tags": []string{}, + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": false, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "gcp_defaults", + cfg: ` + detectors = ["gcp"] + gcp {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"gcp"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "gcp_explicit", + cfg: ` + detectors = ["gcp"] + gcp { + resource_attributes { + cloud.account.id { enabled = true } + cloud.availability_zone { enabled = true } + cloud.platform { enabled = true } + cloud.provider { enabled = true } + cloud.region { enabled = false } + faas.id { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"gcp"}, + "timeout": 5 * time.Second, + "override": true, + "gcp": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "cloud.account.id": map[string]interface{}{ + "enabled": true, + }, + "cloud.availability_zone": map[string]interface{}{ + "enabled": true, + }, + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "cloud.region": map[string]interface{}{ + "enabled": false, + }, + "faas.id": map[string]interface{}{ + "enabled": false, + }, + "faas.instance": map[string]interface{}{ + "enabled": true, + }, + "faas.name": map[string]interface{}{ + "enabled": true, + }, + "faas.version": map[string]interface{}{ + "enabled": true, + }, + "gcp.cloud_run.job.execution": map[string]interface{}{ + "enabled": true, + }, + "gcp.cloud_run.job.task_index": map[string]interface{}{ + "enabled": true, + }, + "gcp.gce.instance.hostname": map[string]interface{}{ + "enabled": false, + }, + "gcp.gce.instance.name": map[string]interface{}{ + "enabled": false, + }, + "host.id": map[string]interface{}{ + "enabled": true, + }, + "host.name": map[string]interface{}{ + "enabled": true, + }, + "host.type": map[string]interface{}{ + "enabled": true, + }, + "k8s.cluster.name": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "docker_defaults", + cfg: ` + detectors = ["docker"] + docker {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"docker"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "docker_explicit", + cfg: ` + detectors = ["docker"] + docker { + resource_attributes { + host.name { enabled = true } + os.type { enabled = false } + + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"docker"}, + "timeout": 5 * time.Second, + "override": true, + "docker": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "host.name": map[string]interface{}{ + "enabled": true, + }, + "os.type": map[string]interface{}{ + "enabled": false, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "lambda_defaults", + cfg: ` + detectors = ["lambda"] + lambda {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"lambda"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "lambda_explicit", + cfg: ` + detectors = ["lambda"] + lambda { + resource_attributes { + aws.log.group.names { enabled = true } + aws.log.stream.names { enabled = true } + cloud.platform { enabled = true } + cloud.provider { enabled = false } + cloud.region { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"lambda"}, + "timeout": 5 * time.Second, + "override": true, + "lambda": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "aws.log.group.names": map[string]interface{}{ + "enabled": true, + }, + "aws.log.stream.names": map[string]interface{}{ + "enabled": true, + }, + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": false, + }, + "cloud.region": map[string]interface{}{ + "enabled": false, + }, + "faas.instance": map[string]interface{}{ + "enabled": true, + }, + "faas.max_memory": map[string]interface{}{ + "enabled": true, + }, + "faas.name": map[string]interface{}{ + "enabled": true, + }, + "faas.version": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "elasticbeanstalk_defaults", + cfg: ` + detectors = ["elasticbeanstalk"] + elasticbeanstalk {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"elasticbeanstalk"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "elasticbeanstalk_explicit", + cfg: ` + detectors = ["elasticbeanstalk"] + elasticbeanstalk { + resource_attributes { + cloud.platform { enabled = true } + cloud.provider { enabled = true } + deployment.environment { enabled = true } + service.instance.id { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"elasticbeanstalk"}, + "timeout": 5 * time.Second, + "override": true, + "elasticbeanstalk": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "deployment.environment": map[string]interface{}{ + "enabled": true, + }, + "service.instance.id": map[string]interface{}{ + "enabled": false, + }, + "service.version": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "consul_defaults", + cfg: ` + detectors = ["consul"] + consul {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"consul"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "consul_explicit", + cfg: ` + detectors = ["consul"] + consul { + address = "localhost:8500" + datacenter = "dc1" + token = "secret_token" + namespace = "test_namespace" + meta = ["test"] + resource_attributes { + cloud.region { enabled = false } + host.id { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"consul"}, + "timeout": 5 * time.Second, + "override": true, + "consul": map[string]interface{}{ + "address": "localhost:8500", + "datacenter": "dc1", + "token": "secret_token", + "namespace": "test_namespace", + "meta": map[string]string{"test": ""}, + "resource_attributes": map[string]interface{}{ + "cloud.region": map[string]interface{}{ + "enabled": false, + }, + "host.id": map[string]interface{}{ + "enabled": false, + }, + "host.name": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "heroku_defaults", + cfg: ` + detectors = ["heroku"] + heroku {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"heroku"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "heroku_explicit", + cfg: ` + detectors = ["heroku"] + heroku { + resource_attributes { + cloud.provider { enabled = true } + heroku.app.id { enabled = true } + heroku.dyno.id { enabled = true } + heroku.release.commit { enabled = true } + heroku.release.creation_timestamp { enabled = false } + service.instance.id { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"heroku"}, + "timeout": 5 * time.Second, + "override": true, + "heroku": map[string]interface{}{ + "resource_attributes": map[string]interface{}{ + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "heroku.app.id": map[string]interface{}{ + "enabled": true, + }, + "heroku.dyno.id": map[string]interface{}{ + "enabled": true, + }, + "heroku.release.commit": map[string]interface{}{ + "enabled": true, + }, + "heroku.release.creation_timestamp": map[string]interface{}{ + "enabled": false, + }, + "service.instance.id": map[string]interface{}{ + "enabled": false, + }, + "service.name": map[string]interface{}{ + "enabled": true, + }, + "service.version": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "kubernetes_node_defaults", + cfg: ` + detectors = ["kubernetes_node"] + kubernetes_node {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"k8snode"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "kubernetes_node_explicit", + cfg: ` + detectors = ["kubernetes_node"] + kubernetes_node { + auth_type = "kubeConfig" + context = "fake_ctx" + node_from_env_var = "MY_CUSTOM_VAR" + resource_attributes { + k8s.node.name { enabled = true } + k8s.node.uid { enabled = false } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"k8snode"}, + "timeout": 5 * time.Second, + "override": true, + "k8snode": map[string]interface{}{ + "auth_type": "kubeConfig", + "context": "fake_ctx", + "node_from_env_var": "MY_CUSTOM_VAR", + "resource_attributes": map[string]interface{}{ + "k8s.node.name": map[string]interface{}{ + "enabled": true, + }, + "k8s.node.uid": map[string]interface{}{ + "enabled": false, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + }, + }, + { + testName: "system_invalid_hostname_source", + cfg: ` + detectors = ["system"] + system { + hostname_sources = ["asdf"] + resource_attributes { } + } + output {} + `, + errorMsg: "invalid hostname source: asdf", + }, + { + testName: "system_defaults", + cfg: ` + detectors = ["system"] + system {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"system"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "system_explicit", + cfg: ` + detectors = ["system"] + system { + hostname_sources = ["cname","lookup"] + resource_attributes { + host.arch { enabled = true } + host.cpu.cache.l2.size { enabled = true } + host.cpu.family { enabled = true } + host.cpu.model.id { enabled = true } + host.cpu.model.name { enabled = true } + host.cpu.stepping { enabled = true } + host.cpu.vendor.id { enabled = false } + host.id { enabled = false } + host.name { enabled = false } + // os.description { enabled = false } + // os.type { enabled = true } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"system"}, + "timeout": 5 * time.Second, + "override": true, + "system": map[string]interface{}{ + "hostname_sources": []string{"cname", "lookup"}, + "resource_attributes": map[string]interface{}{ + "host.arch": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.cache.l2.size": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.family": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.model.id": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.model.name": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.stepping": map[string]interface{}{ + "enabled": true, + }, + "host.cpu.vendor.id": map[string]interface{}{ + "enabled": false, + }, + "host.id": map[string]interface{}{ + "enabled": false, + }, + "host.name": map[string]interface{}{ + "enabled": false, + }, + "os.description": map[string]interface{}{ + "enabled": false, + }, + "os.type": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "openshift_default", + cfg: ` + detectors = ["openshift"] + openshift {} + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"openshift"}, + "timeout": 5 * time.Second, + "override": true, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "openshift_explicit", + cfg: ` + detectors = ["openshift"] + timeout = "7s" + override = false + openshift { + address = "127.0.0.1:4444" + token = "some_token" + tls { + insecure = true + } + resource_attributes { + cloud.platform { + enabled = true + } + cloud.provider { + enabled = true + } + cloud.region { + enabled = false + } + k8s.cluster.name { + enabled = false + } + } + } + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"openshift"}, + "timeout": 7 * time.Second, + "override": false, + "openshift": map[string]interface{}{ + "address": "127.0.0.1:4444", + "token": "some_token", + "tls": map[string]interface{}{ + "insecure": true, + }, + "resource_attributes": map[string]interface{}{ + "cloud.platform": map[string]interface{}{ + "enabled": true, + }, + "cloud.provider": map[string]interface{}{ + "enabled": true, + }, + "cloud.region": map[string]interface{}{ + "enabled": false, + }, + "k8s.cluster.name": map[string]interface{}{ + "enabled": false, + }, + }, + }, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + { + testName: "env", + cfg: ` + detectors = ["env"] + timeout = "7s" + override = false + output {} + `, + expected: map[string]interface{}{ + "detectors": []string{"env"}, + "timeout": 7 * time.Second, + "override": false, + "ec2": ec2.DefaultArguments.Convert(), + "ecs": ecs.DefaultArguments.Convert(), + "eks": eks.DefaultArguments.Convert(), + "elasticbeanstalk": elasticbeanstalk.DefaultArguments.Convert(), + "lambda": lambda.DefaultArguments.Convert(), + "azure": azure.DefaultArguments.Convert(), + "aks": aks.DefaultArguments.Convert(), + "consul": consul.DefaultArguments.Convert(), + "docker": docker.DefaultArguments.Convert(), + "gcp": gcp.DefaultArguments.Convert(), + "heroku": heroku.DefaultArguments.Convert(), + "system": system.DefaultArguments.Convert(), + "openshift": openshift.DefaultArguments.Convert(), + "k8snode": kubernetes_node.DefaultArguments.Convert(), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.testName, func(t *testing.T) { + var args resourcedetection.Arguments + err := river.Unmarshal([]byte(tc.cfg), &args) + if tc.errorMsg != "" { + require.ErrorContains(t, err, tc.errorMsg) + return + } + + require.NoError(t, err) + + actualPtr, err := args.Convert() + require.NoError(t, err) + + actual := actualPtr.(*resourcedetectionprocessor.Config) + + var expected resourcedetectionprocessor.Config + err = mapstructure.Decode(tc.expected, &expected) + require.NoError(t, err) + + require.Equal(t, expected, *actual) + }) + } +} diff --git a/component/pyroscope/ebpf/args.go b/component/pyroscope/ebpf/args.go index facf9129d6ba..c4c444b917f2 100644 --- a/component/pyroscope/ebpf/args.go +++ b/component/pyroscope/ebpf/args.go @@ -10,8 +10,6 @@ import ( type Arguments struct { ForwardTo []pyroscope.Appendable `river:"forward_to,attr"` Targets []discovery.Target `river:"targets,attr,optional"` - DefaultTarget discovery.Target `river:"default_target,attr,optional"` // undocumented, keeping it until we have other sd - TargetsOnly bool `river:"targets_only,attr,optional"` // undocumented, keeping it until we have other sd CollectInterval time.Duration `river:"collect_interval,attr,optional"` SampleRate int `river:"sample_rate,attr,optional"` PidCacheSize int `river:"pid_cache_size,attr,optional"` diff --git a/component/pyroscope/ebpf/ebpf_linux.go b/component/pyroscope/ebpf/ebpf_linux.go index b8b1afbecf59..8d201ac488f1 100644 --- a/component/pyroscope/ebpf/ebpf_linux.go +++ b/component/pyroscope/ebpf/ebpf_linux.go @@ -82,7 +82,6 @@ func defaultArguments() Arguments { CacheRounds: 3, CollectUserProfile: true, CollectKernelProfile: true, - TargetsOnly: true, Demangle: "none", PythonEnabled: true, } @@ -226,8 +225,7 @@ func targetsOptionFromArgs(args Arguments) sd.TargetsOptions { } return sd.TargetsOptions{ Targets: targets, - DefaultTarget: sd.DiscoveryTarget(args.DefaultTarget), - TargetsOnly: args.TargetsOnly, + TargetsOnly: true, ContainerCacheSize: args.ContainerIDCacheSize, } } diff --git a/component/pyroscope/java/asprof/asprof_linux_amd64.go b/component/pyroscope/java/asprof/asprof_linux_amd64.go index 6b7f0a6f74ca..7d405539cda6 100644 --- a/component/pyroscope/java/asprof/asprof_linux_amd64.go +++ b/component/pyroscope/java/asprof/asprof_linux_amd64.go @@ -6,7 +6,7 @@ import ( _ "embed" ) -//go:embed async-profiler-3.0-ea-linux-x64.tar.gz +//go:embed async-profiler-3.0-linux-x64.tar.gz var embededArchiveData []byte // asprof diff --git a/component/pyroscope/java/asprof/asprof_linux_arm64.go b/component/pyroscope/java/asprof/asprof_linux_arm64.go index ce55bdb7ffe8..e6978f02b995 100644 --- a/component/pyroscope/java/asprof/asprof_linux_arm64.go +++ b/component/pyroscope/java/asprof/asprof_linux_arm64.go @@ -6,7 +6,7 @@ import ( _ "embed" ) -//go:embed async-profiler-3.0-ea-linux-arm64.tar.gz +//go:embed async-profiler-3.0-linux-arm64.tar.gz var embededArchiveData []byte // asprof diff --git a/component/pyroscope/java/asprof/async-profiler-3.0-ea-linux-arm64.tar.gz b/component/pyroscope/java/asprof/async-profiler-3.0-ea-linux-arm64.tar.gz deleted file mode 100644 index 425600954162..000000000000 Binary files a/component/pyroscope/java/asprof/async-profiler-3.0-ea-linux-arm64.tar.gz and /dev/null differ diff --git a/component/pyroscope/java/asprof/async-profiler-3.0-ea-linux-x64.tar.gz b/component/pyroscope/java/asprof/async-profiler-3.0-ea-linux-x64.tar.gz deleted file mode 100644 index a9b70fdf87d9..000000000000 Binary files a/component/pyroscope/java/asprof/async-profiler-3.0-ea-linux-x64.tar.gz and /dev/null differ diff --git a/component/pyroscope/java/asprof/async-profiler-3.0-linux-arm64.tar.gz b/component/pyroscope/java/asprof/async-profiler-3.0-linux-arm64.tar.gz new file mode 100644 index 000000000000..fcab1a963d7a Binary files /dev/null and b/component/pyroscope/java/asprof/async-profiler-3.0-linux-arm64.tar.gz differ diff --git a/component/pyroscope/java/asprof/async-profiler-3.0-linux-x64.tar.gz b/component/pyroscope/java/asprof/async-profiler-3.0-linux-x64.tar.gz new file mode 100644 index 000000000000..c4386b482792 Binary files /dev/null and b/component/pyroscope/java/asprof/async-profiler-3.0-linux-x64.tar.gz differ diff --git a/component/pyroscope/java/loop.go b/component/pyroscope/java/loop.go index aee4b8554770..918e97751563 100644 --- a/component/pyroscope/java/loop.go +++ b/component/pyroscope/java/loop.go @@ -152,6 +152,10 @@ func (p *profilingLoop) push(jfrBytes []byte, startTime time.Time, endTime time. for _, l := range jfrpprofPyroscope.Labels(target, profiles.JFREvent, req.Metric, "", spyName) { ls.Set(l.Name, l.Value) } + if ls.Get(labelServiceName) == "" { + ls.Set(labelServiceName, inferServiceName(target)) + } + profile, err := req.Profile.MarshalVT() if err != nil { _ = l.Log("msg", "failed to marshal profile", "err", err) diff --git a/component/pyroscope/java/target.go b/component/pyroscope/java/target.go new file mode 100644 index 000000000000..25a1defebd54 --- /dev/null +++ b/component/pyroscope/java/target.go @@ -0,0 +1,35 @@ +package java + +import ( + "fmt" + + "github.com/grafana/agent/component/discovery" +) + +const ( + labelServiceName = "service_name" + labelServiceNameK8s = "__meta_kubernetes_pod_annotation_pyroscope_io_service_name" +) + +func inferServiceName(target discovery.Target) string { + k8sServiceName := target[labelServiceNameK8s] + if k8sServiceName != "" { + return k8sServiceName + } + k8sNamespace := target["__meta_kubernetes_namespace"] + k8sContainer := target["__meta_kubernetes_pod_container_name"] + if k8sNamespace != "" && k8sContainer != "" { + return fmt.Sprintf("java/%s/%s", k8sNamespace, k8sContainer) + } + dockerContainer := target["__meta_docker_container_name"] + if dockerContainer != "" { + return dockerContainer + } + if swarmService := target["__meta_dockerswarm_container_label_service_name"]; swarmService != "" { + return swarmService + } + if swarmService := target["__meta_dockerswarm_service_name"]; swarmService != "" { + return swarmService + } + return "unspecified" +} diff --git a/component/pyroscope/scrape/target.go b/component/pyroscope/scrape/target.go index 703a93dd63be..736d75b43f78 100644 --- a/component/pyroscope/scrape/target.go +++ b/component/pyroscope/scrape/target.go @@ -430,5 +430,11 @@ func inferServiceName(lset labels.Labels) string { if dockerContainer != "" { return dockerContainer } + if swarmService := lset.Get("__meta_dockerswarm_container_label_service_name"); swarmService != "" { + return swarmService + } + if swarmService := lset.Get("__meta_dockerswarm_service_name"); swarmService != "" { + return swarmService + } return "unspecified" } diff --git a/converter/internal/staticconvert/internal/build/builder.go b/converter/internal/staticconvert/internal/build/builder.go index 58fedf6225c2..dadc4ae3fd96 100644 --- a/converter/internal/staticconvert/internal/build/builder.go +++ b/converter/internal/staticconvert/internal/build/builder.go @@ -204,6 +204,10 @@ func (b *IntegrationsConfigBuilder) appendExporter(commonConfig *int_config.Comm RemoteWriteConfigs: b.cfg.Integrations.ConfigV1.PrometheusRemoteWrite, } + if len(b.cfg.Integrations.ConfigV1.PrometheusRemoteWrite) == 0 { + b.diags.Add(diag.SeverityLevelError, "The converter does not support handling integrations which are not connected to a remote_write.") + } + jobNameToCompLabelsFunc := func(jobName string) string { return b.jobNameToCompLabel(jobName) } diff --git a/converter/internal/staticconvert/testdata/integrations_no_rw.diags b/converter/internal/staticconvert/testdata/integrations_no_rw.diags new file mode 100644 index 000000000000..1f0d463ede34 --- /dev/null +++ b/converter/internal/staticconvert/testdata/integrations_no_rw.diags @@ -0,0 +1,2 @@ +(Error) The converter does not support handling integrations which are not connected to a remote_write. +(Warning) Please review your agent command line flags and ensure they are set in your Flow mode config file where necessary. \ No newline at end of file diff --git a/converter/internal/staticconvert/testdata/integrations_no_rw.yaml b/converter/internal/staticconvert/testdata/integrations_no_rw.yaml new file mode 100644 index 000000000000..76e4848e56b5 --- /dev/null +++ b/converter/internal/staticconvert/testdata/integrations_no_rw.yaml @@ -0,0 +1,4 @@ +integrations: + node_exporter: + scrape_integration: true + enabled: true \ No newline at end of file diff --git a/docs/generator/links_to_types.go b/docs/generator/links_to_types.go index 867654e1648d..8de89bfd1321 100644 --- a/docs/generator/links_to_types.go +++ b/docs/generator/links_to_types.go @@ -38,12 +38,10 @@ func (l *LinksToTypesGenerator) Generate() (string, error) { } note := ` -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} ` return heading + acceptingSection + outputSection + note, nil diff --git a/docs/make-docs b/docs/make-docs index 25176a37f051..d5d861ca83b4 100755 --- a/docs/make-docs +++ b/docs/make-docs @@ -6,7 +6,13 @@ # [Semantic versioning](https://semver.org/) is used to help the reader identify the significance of changes. # Changes are relevant to this script and the support docs.mk GNU Make interface. # - +# ## 5.2.0 (2024-01-18) +# +# ### Changed +# +# - Updated `make vale` to use latest Vale style and configuration. +# - Updated `make vale` to use platform appropriate image. +# # ## 5.1.2 (2023-11-08) # # ### Added @@ -704,14 +710,14 @@ case "${image}" in "${PODMAN}" run \ --init \ --interactive \ - --platform linux/amd64 \ --rm \ + --workdir /etc/vale \ --tty \ ${volumes} \ "${DOCS_IMAGE}" \ "--minAlertLevel=${VALE_MINALERTLEVEL}" \ - --config=/etc/vale/.vale.ini \ - --output=line \ + '--glob=*.md' \ + --output=/etc/vale/rdjsonl.tmpl \ /hugo/content/docs | sed "s#$(proj_dst "${proj}")#sources#" ;; *) diff --git a/docs/rfcs/0000-template.md b/docs/rfcs/0000-template.md index bbc01019c3ab..c565ea04e584 100644 --- a/docs/rfcs/0000-template.md +++ b/docs/rfcs/0000-template.md @@ -3,4 +3,3 @@ * Date: YYYY-MM-DD * Author: Full Name (@github_username) * PR: [grafana/agent#XXXX](https://github.com/grafana/agent/pull/XXXX) -* Status: Draft diff --git a/docs/rfcs/0001-designing-in-the-open.md b/docs/rfcs/0001-designing-in-the-open.md index 8f73f5d7a8d2..7419b060a375 100644 --- a/docs/rfcs/0001-designing-in-the-open.md +++ b/docs/rfcs/0001-designing-in-the-open.md @@ -3,7 +3,6 @@ * Date: 2021-11-02 * Author: Robert Fratto (@rfratto) * PR: [grafana/agent#1055](https://github.com/grafana/agent/pull/1055) -* Status: Implemented ## Summary diff --git a/docs/rfcs/0002-integrations-in-operator.md b/docs/rfcs/0002-integrations-in-operator.md index 8003606d3d46..ed54d40de6bd 100644 --- a/docs/rfcs/0002-integrations-in-operator.md +++ b/docs/rfcs/0002-integrations-in-operator.md @@ -3,7 +3,6 @@ * Date: 2022-01-04 * Author: Robert Fratto (@rfratto) * PR: [grafana/agent#1224](https://github.com/grafana/agent/pull/1224) -* Status: Draft ## Background diff --git a/docs/rfcs/0003-new-metrics-subsystem.md b/docs/rfcs/0003-new-metrics-subsystem.md index 961cd983e14a..336c0e4cc475 100644 --- a/docs/rfcs/0003-new-metrics-subsystem.md +++ b/docs/rfcs/0003-new-metrics-subsystem.md @@ -3,7 +3,7 @@ * Date: 2021-11-29 * Author: Robert Fratto (@rfratto) * PR: [grafana/agent#1140](https://github.com/grafana/agent/pull/1140) -* Status: Draft +* Status: Abandoned ## Background diff --git a/docs/rfcs/0004-agent-flow.md b/docs/rfcs/0004-agent-flow.md index db061af7a16b..3c1052e926ad 100644 --- a/docs/rfcs/0004-agent-flow.md +++ b/docs/rfcs/0004-agent-flow.md @@ -1,14 +1,13 @@ -# This provided the basis for Agent Flow, and though not all the concepts/ideas will make it into flow, it is good to have the historical context for why we started down this path. +# This provided the basis for Agent Flow, and though not all the concepts/ideas will make it into flow, it is good to have the historical context for why we started down this path. -# Agent Flow - Agent Utilizing Components +# Agent Flow - Agent Utilizing Components * Date: 2022-03-30 * Author: Matt Durham (@mattdurham) -* PRs: - * [grafana/agent#1538](https://github.com/grafana/agent/pull/1538) - Problem Statement +* PRs: + * [grafana/agent#1538](https://github.com/grafana/agent/pull/1538) - Problem Statement * [grafana/agent#1546](https://github.com/grafana/agent/pull/1546) - Messages and Expressions -* Status: Draft ## Overarching Problem Statement @@ -17,7 +16,7 @@ The Agents configuration and onboarding is difficult to use. Viewing the effect ## Description -Agent Flow is intended to solve real world needs that the Grafana Agent team have identified in conversations with users and developers. +Agent Flow is intended to solve real world needs that the Grafana Agent team have identified in conversations with users and developers. These broadly include: @@ -32,13 +31,13 @@ These broadly include: - Lack of understanding how telemetry data moves through agent - Other systems use pipeline/extensions to allow users to understand how data moves through the system -# 1. Introduction and Goals +# 1. Introduction and Goals -This design document outlines Agent Flow, a system for describing a programmable pipeline for telemetry data. +This design document outlines Agent Flow, a system for describing a programmable pipeline for telemetry data. Agent Flow refers to both the execution, configuration and visual configurator of data flow. -### Goals +### Goals * Allow users to more easily understand the impact of their configuration * Allow users to collect integration metrics across a set of agents @@ -55,43 +54,43 @@ Agent Flow refers to both the execution, configuration and visual configurator o At a high level, Agent Flow: -* Breaks apart the existing hierarchical configuration file into reusable components +* Breaks apart the existing hierarchical configuration file into reusable components * Allows components to be connected, resulting in a programmable pipeline of telemetry data -This document considers three potential approaches to allow users to connect components together: +This document considers three potential approaches to allow users to connect components together: -1. Message passing (i.e., an actor model) +1. Message passing (i.e., an actor model) 2. Expressions (i.e., directly referencing the output of another component) -3. A hybrid of both messages and expressions +3. A hybrid of both messages and expressions -The Flow Should in general resemble a flowchart or node graph. The data flow diagram would conceptually look like the below, with each node being composable and connecting with other nodes. +The Flow Should in general resemble a flowchart or node graph. The data flow diagram would conceptually look like the below, with each node being composable and connecting with other nodes. ``` -┌─────────────────────────┐ ┌──────────────────┐ ┌─────────────────────┐ ┌───────────────────┐ -│ │ ┌─────▶│ Target Filter │─────────▶│ Redis Integration │──────▶│ Metric Filter │──┐ -│ │ │ └──────────────────┘ └─────────────────────┘ └───────────────────┘ │ -│ Service Discovery │──────┤ │ -│ │ │ │ -│ │ │ │ -└─────────────────────────┘ │ ┌─────────────────┐ ┌──────────────────────┐ ┌────────┘ - ├─────▶│ Target Filter │──────────▶│ MySQL Integrations │───────────┐ │ - │ └─────────────────┘ └──────────────────────┘ │ │ - │ │ │ - │ ┌─────────────────┐ ┌─────────────┐ │ │ +┌─────────────────────────┐ ┌──────────────────┐ ┌─────────────────────┐ ┌───────────────────┐ +│ │ ┌─────▶│ Target Filter │─────────▶│ Redis Integration │──────▶│ Metric Filter │──┐ +│ │ │ └──────────────────┘ └─────────────────────┘ └───────────────────┘ │ +│ Service Discovery │──────┤ │ +│ │ │ │ +│ │ │ │ +└─────────────────────────┘ │ ┌─────────────────┐ ┌──────────────────────┐ ┌────────┘ + ├─────▶│ Target Filter │──────────▶│ MySQL Integrations │───────────┐ │ + │ └─────────────────┘ └──────────────────────┘ │ │ + │ │ │ + │ ┌─────────────────┐ ┌─────────────┐ │ │ └──────▶│ Target Filter │─────────────▶│ Scraper │─────────────┐ │ │ ┌────────────────┐ └─────────────────┘ └─────────────┘ └──┴┬───────┴─▶│ Remote Write │ │ └────────────────┘ - │ - │ -┌──────────────────────────┐ │ -│ Remote Write Receiver │─────┐ ┌───────────────────────┐ │ -└──────────────────────────┘ │ ┌────▶│ Metric Transformer │─────────┘ - │ │ └───────────────────────┘ - │ │ -┌─────────────────────────┐ │ ┌────────────────────┐ │ -│ HTTP Receiver │──────┴─────▶│ Metric Filter │────┘ ┌──────────────────────────────────┐ -└─────────────────────────┘ └────────────────────┘ │ Global and Server Settings │ - └──────────────────────────────────┘ + │ + │ +┌──────────────────────────┐ │ +│ Remote Write Receiver │─────┐ ┌───────────────────────┐ │ +└──────────────────────────┘ │ ┌────▶│ Metric Transformer │─────────┘ + │ │ └───────────────────────┘ + │ │ +┌─────────────────────────┐ │ ┌────────────────────┐ │ +│ HTTP Receiver │──────┴─────▶│ Metric Filter │────┘ ┌──────────────────────────────────┐ +└─────────────────────────┘ └────────────────────┘ │ Global and Server Settings │ + └──────────────────────────────────┘ ``` **Note: Consider all examples pseudoconfig** @@ -107,14 +106,14 @@ Expression based is writing expressions that allow referencing other components **Cons** * Harder for users to wire things together - * References to components are more complex, which may be harder to understand + * References to components are more complex, which may be harder to understand * Harder to build a GUI for * Every field of a component is potentially dynamic, making it harder to represent visually ## 2.2 Message Based -Message based is where components have no knowledge of other components and information is passed strictly via input and output streams. +Message based is where components have no knowledge of other components and information is passed strictly via input and output streams. **Pros** @@ -122,7 +121,7 @@ Message based is where components have no knowledge of other components and info * Easier to build a GUI for * Inputs and Outputs are well defined and less granular * Connections are made by connecting two components directly, compared to expressions which connect subsets of a component's output -* References between components are no more than strings, making the text-based representation language agnostic (e.g., it could be YAML, JSON, or any language) +* References between components are no more than strings, making the text-based representation language agnostic (e.g., it could be YAML, JSON, or any language) **Cons** @@ -130,16 +129,16 @@ Message based is where components have no knowledge of other components and info * Larger type system needed * More structured to keep the amount of types down -Messages require a more rigid type structure to minimize the number of total components. +Messages require a more rigid type structure to minimize the number of total components. For example, it would be preferable to have a single `Credential` type that can be emitted by an s3, Vault, or Consul component. These components would then need to set a field that marks their output as a specific kind of Credential (such as Basic Auth or Bearer Auth). If, instead, you had multiple Credential types, like `MySQLCredentials` and `RedisCredentials`, you would have the following components: -* Vault component for MySQL credentials -* Vault component for Redis credentials -* S3 component for MySQL credentials -* S3 component for Redis credentials +* Vault component for MySQL credentials +* Vault component for Redis credentials +* S3 component for MySQL credentials +* S3 component for Redis credentials * (and so on) ## 2.3 Hybrid @@ -157,10 +156,10 @@ discovery "mysql_pods" { integration "mysql" { - # Create one mysql integration for every element in the array here + # Create one mysql integration for every element in the array here for_each = discovery.mysql_pods.targets - # Each spawned mysql integration has its data_source_name derived from + # Each spawned mysql integration has its data_source_name derived from # the address label of the input target. data_source_name = "root@(${each.labels["__address__"]})" } diff --git a/docs/rfcs/0005-river.md b/docs/rfcs/0005-river.md index 8f4e3e12299b..3fa82a5f7eb2 100644 --- a/docs/rfcs/0005-river.md +++ b/docs/rfcs/0005-river.md @@ -3,7 +3,6 @@ * Date: 2022-06-27 * Author: Robert Fratto (@rfratto), Matt Durham (@mattdurham) * PR: [grafana/agent#1839](https://github.com/grafana/agent/pull/1839) -* Status: Draft ## Summary diff --git a/docs/rfcs/0006-clustering.md b/docs/rfcs/0006-clustering.md index b6c08b2bc210..b29070410e47 100644 --- a/docs/rfcs/0006-clustering.md +++ b/docs/rfcs/0006-clustering.md @@ -3,7 +3,6 @@ * Date: 2023-03-02 * Author: Paschalis Tsilias (@tpaschalis) * PR: [grafana/agent#3151](https://github.com/grafana/agent/pull/3151) -* Status: Draft ## Summary - Background We routinely run agents with 1-10 million active series; we regularly see @@ -98,7 +97,7 @@ presented in the next section. ## Use cases In the first iteration of agent clustering, we would like to start with the following use-cases. These two are distinct in the way that they make use of -scheduling. +scheduling. The first one makes sure that we have a way of notifying components of cluster changes and calling their Update method and continuously re-evaluate ownership @@ -112,9 +111,9 @@ it is scraping/reading logs from. Components that use the Flow concept of a “target” as their Arguments should be able to distribute the target load between themselves. To do that we can introduce a layer of abstraction over the Targets definition that can interact with the Sharder provided by the -clusterer and provide a simple API, for example: +clusterer and provide a simple API, for example: ```go -type Targets interface { +type Targets interface { Get() []Target } ``` @@ -136,9 +135,9 @@ I propose that we start with the following set of components that make use of this functionality: prometheus.scrape, loki.source.file, loki.source.kubernetes, and pyroscope.scrape. -Here’s how the configuration for a component could look like: +Here’s how the configuration for a component could look like: ```river -prometheus.scrape "pods" { +prometheus.scrape "pods" { clustering { node_updates = true } @@ -200,7 +199,7 @@ information. On a more practical note, we’ll have to choose how components might use to opt-in to the component scheduling. -For example, we could implement either: +For example, we could implement either: * Implicitly adding a new Argument block that is implicitly present by default on _all_ components: ``` diff --git a/docs/rfcs/0006-future-of-agent-operator.md b/docs/rfcs/0006-future-of-agent-operator.md index e0ed4bef9304..3a5c3d2e5611 100644 --- a/docs/rfcs/0006-future-of-agent-operator.md +++ b/docs/rfcs/0006-future-of-agent-operator.md @@ -3,7 +3,6 @@ * Date: 2022-08-17 * Author: Craig Peterson (@captncraig) * PR: [grafana/agent#2046](https://github.com/grafana/agent/pull/2046) -* Status: Draft ## Summary @@ -31,6 +30,6 @@ The operator is a fairly complex piece of code, and has been slower than some ot ## Beta status -The Grafana Agent Operator is still considered beta software. It has received a better reception than anticipated, and is now an important part of the Agent project. We are committed to supporting the Operator into the future, but are going to leave the beta designation in place while making larger refactorings as described above. We make efforts to avoid breaking changes, and hope that custom resource definitions will remain compatible, but it is possible some changes will be necessary. We will make every effort to justify and communicate such scenarios as they arise. +The Grafana Agent Operator is still considered beta software. It has received a better reception than anticipated, and is now an important part of the Agent project. We are committed to supporting the Operator into the future, but are going to leave the beta designation in place while making larger refactorings as described above. We make efforts to avoid breaking changes, and hope that custom resource definitions will remain compatible, but it is possible some changes will be necessary. We will make every effort to justify and communicate such scenarios as they arise. Once we are confident we have an Operator we are happy with and that the resource definitions are stable, we will revisit the beta status as soon as we can. diff --git a/docs/rfcs/0007-flow-modules.md b/docs/rfcs/0007-flow-modules.md index f08fb74c0f3b..5058663dd3ba 100644 --- a/docs/rfcs/0007-flow-modules.md +++ b/docs/rfcs/0007-flow-modules.md @@ -3,7 +3,6 @@ * Date: 2023-01-27 * Author: Matt Durham @mattdurham * PR: [grafana/agent#2898](https://github.com/grafana/agent/pull/2898) -* Status: Draft [Formatted Link for ease of user](https://github.com/grafana/agent/blob/rfc_modules/docs/rfcs/0007-flow-modules.md) @@ -30,7 +29,7 @@ During this time the Agent team saw a lot of potential in the idea of "modules." ### Enable re-use of common patterns -Common functionality can be wrapped in a set of common components that form a module. These shared modules can then be used instead of reinventing use cases. +Common functionality can be wrapped in a set of common components that form a module. These shared modules can then be used instead of reinventing use cases. ### Allow loading a module from a string @@ -42,11 +41,11 @@ Modules will be able to load other modules, with reasonable safe guards. There w ### Modules should be sandboxed except via arguments and exports -Modules cannot directly access children or parent modules except through predefined arguments and exports. +Modules cannot directly access children or parent modules except through predefined arguments and exports. ## Non Goals -Non goals represent capabilities that are not going to be done in the initial release of modules but may come in later versions. +Non goals represent capabilities that are not going to be done in the initial release of modules but may come in later versions. * Add additional capabilities to load strings * Any type of versioning @@ -66,7 +65,7 @@ Modules will not contain any sort of versioning nor will check for compatibility ### Any user interface work beyond ensuring it works as the UI currently does -Users will not be able to drill into modules, they will be represented as any other normal component. +Users will not be able to drill into modules, they will be represented as any other normal component. ## Example @@ -122,7 +121,7 @@ prometheus.scrape "scraper" { * A module cannot directly or indirectly load itself, this will not be enforced by the system * Singleton components are not supported at this time. Example [node_exporter](https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.integration.node_exporter/). * Modules will not prevent competing resources, such as starting a server on the same port -* [Configuration blocks](https://grafana.com/docs/agent/latest/flow/reference/config-blocks/#configuration-blocks) will not be supported. +* [Configuration blocks](https://grafana.com/docs/agent/latest/flow/reference/config-blocks/#configuration-blocks) will not be supported. * Names of arguments and exports within a module must be unique across that module. ## Proposal diff --git a/docs/rfcs/0008-backwards-compatibility.md b/docs/rfcs/0008-backwards-compatibility.md index 147490f41e40..56d4bac647a3 100644 --- a/docs/rfcs/0008-backwards-compatibility.md +++ b/docs/rfcs/0008-backwards-compatibility.md @@ -3,7 +3,6 @@ * Date: 2023-05-25 * Author: Robert Fratto (@rfratto) * PR: [grafana/agent#3981](https://github.com/grafana/agent/pull/3981) -* Status: Draft Grafana Agent has been following [semantic versioning](https://semver.org/) since its inception. After three years of development and 33 minor releases, the project is on trajectory to have a 1.0 release.  diff --git a/docs/sources/_index.md b/docs/sources/_index.md index 780a3800da31..a902be317bab 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -9,7 +9,7 @@ title: Grafana Agent description: Grafana Agent is a flexible, performant, vendor-neutral, telemetry collector weight: 350 cascade: - AGENT_RELEASE: v0.39.0 + AGENT_RELEASE: v0.39.2 OTEL_VERSION: v0.87.0 --- @@ -24,11 +24,11 @@ Grafana Agent is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, processing, and delivery. -{{% admonition type="note" %}} +{{< admonition type="note" >}} This page focuses mainly on [Flow mode](https://grafana.com/docs/agent//flow/), the Terraform-inspired variant of Grafana Agent. For information on other variants of Grafana Agent, refer to [Introduction to Grafana Agent]({{< relref "./about.md" >}}). -{{% /admonition %}} +{{< /admonition >}} Grafana Agent can collect, transform, and send data to: diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t index 549ba33ef8db..daf939a62ac3 100644 --- a/docs/sources/_index.md.t +++ b/docs/sources/_index.md.t @@ -24,11 +24,11 @@ Grafana Agent is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, processing, and delivery. -{{% admonition type="note" %}} +{{< admonition type="note" >}} This page focuses mainly on [Flow mode](https://grafana.com/docs/agent//flow/), the Terraform-inspired variant of Grafana Agent. For information on other variants of Grafana Agent, refer to [Introduction to Grafana Agent]({{< relref "./about.md" >}}). -{{% /admonition %}} +{{< /admonition >}} Grafana Agent can collect, transform, and send data to: diff --git a/docs/sources/about.md b/docs/sources/about.md index 57468c7f3e24..eca262408d7d 100644 --- a/docs/sources/about.md +++ b/docs/sources/about.md @@ -105,10 +105,10 @@ You should run Static mode when: ### Static mode Kubernetes operator -{{% admonition type="note" %}} +{{< admonition type="note" >}} Grafana Agent version 0.37 and newer provides Prometheus Operator compatibility in Flow mode. You should use Grafana Agent Flow mode for all new Grafana Agent deployments. -{{% /admonition %}} +{{< /admonition >}} The [Static mode Kubernetes operator][] is a variant of Grafana Agent introduced on June 17, 2021. It's currently in beta. diff --git a/docs/sources/flow/_index.md b/docs/sources/flow/_index.md index 1b95fbe29ae8..cc800508f222 100644 --- a/docs/sources/flow/_index.md +++ b/docs/sources/flow/_index.md @@ -68,13 +68,13 @@ prometheus.remote_write "default" { ``` -## {{< param "PRODUCT_NAME" >}} configuration generator +## {{% param "PRODUCT_NAME" %}} configuration generator The {{< param "PRODUCT_NAME" >}} [configuration generator](https://grafana.github.io/agent-configurator/) will help you get a head start on creating flow code. -{{% admonition type="note" %}} +{{< admonition type="note" >}} This feature is experimental, and it doesn't support all River components. -{{% /admonition %}} +{{< /admonition >}} ## Next steps diff --git a/docs/sources/flow/concepts/config-language/expressions/types_and_values.md b/docs/sources/flow/concepts/config-language/expressions/types_and_values.md index 1f27c0b5ecac..70afaf790472 100644 --- a/docs/sources/flow/concepts/config-language/expressions/types_and_values.md +++ b/docs/sources/flow/concepts/config-language/expressions/types_and_values.md @@ -167,7 +167,7 @@ If the key isn't a valid identifier, you must wrap it in double quotes like a st } ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} Don't confuse objects with blocks. * An _object_ is a value assigned to an [Attribute][]. You **must** use commas between key-value pairs on separate lines. @@ -175,7 +175,7 @@ Don't confuse objects with blocks. [Attribute]: {{< relref "../syntax.md#Attributes" >}} [Block]: {{< relref "../syntax.md#Blocks" >}} -{{% /admonition %}} +{{< /admonition >}} ## Functions diff --git a/docs/sources/flow/concepts/modules.md b/docs/sources/flow/concepts/modules.md index 940357f30127..28ebbfb499cd 100644 --- a/docs/sources/flow/concepts/modules.md +++ b/docs/sources/flow/concepts/modules.md @@ -39,9 +39,9 @@ Module loader components are responsible for the following functions: Module loaders are typically called `module.LOADER_NAME`. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Some module loaders may not support running modules with arguments or exports. -{{% /admonition %}} +{{< /admonition >}} Refer to [Components][] for more information about the module loader components. diff --git a/docs/sources/flow/get-started/_index.md b/docs/sources/flow/get-started/_index.md index 80b48bfdaece..444b64f5afc5 100644 --- a/docs/sources/flow/get-started/_index.md +++ b/docs/sources/flow/get-started/_index.md @@ -20,6 +20,6 @@ weight: 50 # Get started with {{% param "PRODUCT_NAME" %}} This section covers topics that help you get started with {{< param "PRODUCT_NAME" >}}, -including installation, running the agent, overview of deployment topologies, and more. +including installation, running {{< param "PRODUCT_NAME" >}}, overview of deployment topologies, and more. {{< section >}} diff --git a/docs/sources/flow/get-started/install/_index.md b/docs/sources/flow/get-started/install/_index.md index dabb07857d74..25b9a5b2f101 100644 --- a/docs/sources/flow/get-started/install/_index.md +++ b/docs/sources/flow/get-started/install/_index.md @@ -29,9 +29,9 @@ The following architectures are supported: - macOS: AMD64 (Intel), ARM64 (Apple Silicon) - FreeBSD: AMD64 -{{% admonition type="note" %}} -Installing {{< param "PRODUCT_NAME" >}} on other operating systems is possible, but is not recommended or supported. -{{% /admonition %}} +{{< admonition type="note" >}} +Installing {{< param "PRODUCT_NAME" >}} on other operating systems is possible, but isn't recommended or supported. +{{< /admonition >}} {{< section >}} diff --git a/docs/sources/flow/get-started/install/chef.md b/docs/sources/flow/get-started/install/chef.md new file mode 100644 index 000000000000..ef348384a5ed --- /dev/null +++ b/docs/sources/flow/get-started/install/chef.md @@ -0,0 +1,104 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/get-started/install/chef/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/chef/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/chef/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/install/chef/ + +canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/chef/ +description: Learn how to install Grafana Agent Flow with Chef +menuTitle: Chef +title: Install Grafana Agent Flow with Chef +weight: 550 +--- + +# Install {{% param "PRODUCT_NAME" %}} with Chef + +You can use Chef to install and manage {{< param "PRODUCT_NAME" >}}. + +## Before you begin + +- These steps assume you already have a working [Chef][] setup. +- You can add the following resources to any new or existing recipe. +- These tasks install {{< param "PRODUCT_NAME" >}} from the package repositories. The tasks target Linux systems from the following families: + - Debian (including Ubuntu) + - RedHat Enterprise Linux + - Amazon Linux + - Fedora + +## Steps + +To add {{< param "PRODUCT_NAME" >}} to a host: + +1. Add the following resources to your [Chef][] recipe to add the Grafana package repositories to your system: + + ```ruby + if platform_family?('debian', 'rhel', 'amazon', 'fedora') + if platform_family?('debian') + remote_file '/etc/apt/keyrings/grafana.gpg' do + source 'https://apt.grafana.com/gpg.key' + mode '0644' + action :create + end + + file '/etc/apt/sources.list.d/grafana.list' do + content "deb [signed-by=/etc/apt/keyrings/grafana.gpg] https://apt.grafana.com/ stable main" + mode '0644' + notifies :update, 'apt_update[update apt cache]', :immediately + end + + apt_update 'update apt cache' do + action :nothing + end + elsif platform_family?('rhel', 'amazon', 'fedora') + yum_repository 'grafana' do + description 'grafana' + baseurl 'https://rpm.grafana.com/oss/rpm' + gpgcheck true + gpgkey 'https://rpm.grafana.com/gpg.key' + enabled true + action :create + notifies :run, 'execute[add-rhel-key]', :immediately + end + + execute 'add-rhel-key' do + command "rpm --import https://rpm.grafana.com/gpg.key" + action :nothing + end + end + else + fail "The #{node['platform_family']} platform is not supported." + end + ``` + +1. Add the following resources to install and enable the `grafana-agent-flow` service: + + ```ruby + package 'grafana-agent-flow' do + action :install + flush_cache [ :before ] if platform_family?('amazon', 'rhel', 'fedora') + notifies :restart, 'service[grafana-agent-flow]', :delayed + end + + service 'grafana-agent-flow' do + service_name 'grafana-agent-flow' + action [:enable, :start] + end + ``` + +## Configuration + +The `grafana-agent-flow` package installs a default configuration file that doesn't send telemetry anywhere. + +The default configuration file location is `/etc/grafana-agent-flow.river`. You can replace this file with your own configuration or create a new configuration file for the service to use. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +[Chef]: https://www.chef.io/products/chef-infrastructure-management/ + +{{% docs/reference %}} +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" +{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/install/docker.md b/docs/sources/flow/get-started/install/docker.md index c7884a6dc21b..c7e07b1b3b7a 100644 --- a/docs/sources/flow/get-started/install/docker.md +++ b/docs/sources/flow/get-started/install/docker.md @@ -57,10 +57,10 @@ Replace the following: You can modify the last line to change the arguments passed to the {{< param "PRODUCT_NAME" >}} binary. Refer to the documentation for [run][] for more information about the options available to the `run` command. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Make sure you pass `--server.http.listen-addr=0.0.0.0:12345` as an argument as shown in the example above. If you don't pass this argument, the [debugging UI][UI] won't be available outside of the Docker container. -{{% /admonition %}} +{{< /admonition >}} ## Run a Windows Docker container @@ -82,10 +82,10 @@ Replace the following: You can modify the last line to change the arguments passed to the {{< param "PRODUCT_NAME" >}} binary. Refer to the documentation for [run][] for more information about the options available to the `run` command. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Make sure you pass `--server.http.listen-addr=0.0.0.0:12345` as an argument as shown in the example above. If you don't pass this argument, the [debugging UI][UI] won't be available outside of the Docker container. -{{% /admonition %}} +{{< /admonition >}} ## Verify diff --git a/docs/sources/flow/get-started/install/kubernetes.md b/docs/sources/flow/get-started/install/kubernetes.md index 9326fce4bf03..d045c7b5ce13 100644 --- a/docs/sources/flow/get-started/install/kubernetes.md +++ b/docs/sources/flow/get-started/install/kubernetes.md @@ -30,10 +30,10 @@ weight: 200 ## Deploy -{{% admonition type="note" %}} +{{< admonition type="note" >}} These instructions show you how to install the generic [Helm chart](https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent) for {{< param "PRODUCT_NAME" >}}. You can deploy {{< param "PRODUCT_ROOT_NAME" >}} either in static mode or flow mode. The Helm chart deploys {{< param "PRODUCT_NAME" >}} by default. -{{% /admonition %}} +{{< /admonition >}} To deploy {{< param "PRODUCT_ROOT_NAME" >}} on Kubernetes using Helm, run the following commands in a terminal window: diff --git a/docs/sources/flow/get-started/install/macos.md b/docs/sources/flow/get-started/install/macos.md index 9903e13ff632..c16f70e6d941 100644 --- a/docs/sources/flow/get-started/install/macos.md +++ b/docs/sources/flow/get-started/install/macos.md @@ -22,9 +22,9 @@ weight: 400 You can install {{< param "PRODUCT_NAME" >}} on macOS with Homebrew . -{{% admonition type="note" %}} +{{< admonition type="note" >}} The default prefix for Homebrew on Intel is `/usr/local`. The default prefix for Homebrew on Apple Silicon is `/opt/Homebrew`. To verify the default prefix for Homebrew on your computer, open a terminal window and type `brew --prefix`. -{{% /admonition %}} +{{< /admonition >}} ## Before you begin diff --git a/docs/sources/flow/get-started/install/puppet.md b/docs/sources/flow/get-started/install/puppet.md new file mode 100644 index 000000000000..db3fb2b4886d --- /dev/null +++ b/docs/sources/flow/get-started/install/puppet.md @@ -0,0 +1,113 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/get-started/install/puppet/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/get-started/install/puppet/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/get-started/install/puppet/ +- /docs/grafana-cloud/send-data/agent/flow/get-started/install/puppet/ + +canonical: https://grafana.com/docs/agent/latest/flow/get-started/install/puppet/ +description: Learn how to install Grafana Agent Flow with Puppet +menuTitle: Puppet +title: Install Grafana Agent Flow with Puppet +weight: 560 +--- + +# Install {{% param "PRODUCT_NAME" %}} with Puppet + +You can use Puppet to install and manage {{< param "PRODUCT_NAME" >}}. + +## Before you begin + +- These steps assume you already have a working [Puppet][] setup. +- You can add the following manifest to any new or existing module. +- The manifest installs {{< param "PRODUCT_NAME" >}} from the package repositories. It targets Linux systems from the following families: + - Debian (including Ubuntu) + - RedHat Enterprise Linux (including Fedora) + +## Steps + +To add {{< param "PRODUCT_NAME" >}} to a host: + +1. Ensure that the following module dependencies are declared and installed: + + ```json + { + "name": "puppetlabs/apt", + "version_requirement": ">= 4.1.0 <= 7.0.0" + }, + { + "name": "puppetlabs/yumrepo_core", + "version_requirement": "<= 2.0.0" + } + ``` + +1. Create a new [Puppet][] manifest with the following class to add the Grafana package repositories, install the `grafana-agent-flow` package, and run the service: + + ```ruby + class grafana_agent::grafana_agent_flow () { + case $::os['family'] { + 'debian': { + apt::source { 'grafana': + location => 'https://apt.grafana.com/', + release => '', + repos => 'stable main', + key => { + id => 'B53AE77BADB630A683046005963FA27710458545', + source => 'https://apt.grafana.com/gpg.key', + }, + } -> package { 'grafana-agent-flow': + require => Exec['apt_update'], + } -> service { 'grafana-agent-flow': + ensure => running, + name => 'grafana-agent-flow', + enable => true, + subscribe => Package['grafana-agent-flow'], + } + } + 'redhat': { + yumrepo { 'grafana': + ensure => 'present', + name => 'grafana', + descr => 'grafana', + baseurl => 'https://packages.grafana.com/oss/rpm', + gpgkey => 'https://packages.grafana.com/gpg.key', + enabled => '1', + gpgcheck => '1', + target => '/etc/yum.repo.d/grafana.repo', + } -> package { 'grafana-agent-flow': + } -> service { 'grafana-agent-flow': + ensure => running, + name => 'grafana-agent-flow', + enable => true, + subscribe => Package['grafana-agent-flow'], + } + } + default: { + fail("Unsupported OS family: (${$::os['family']})") + } + } + } + ``` + +1. To use this class in a module, add the following line to the module's `init.pp` file: + + ```ruby + include grafana_agent::grafana_agent_flow + ``` + +## Configuration + +The `grafana-agent-flow` package installs a default configuration file that doesn't send telemetry anywhere. + +The default configuration file location is `/etc/grafana-agent-flow.river`. You can replace this file with your own configuration, or create a new configuration file for the service to use. + +## Next steps + +- [Configure {{< param "PRODUCT_NAME" >}}][Configure] + +[Puppet]: https://www.puppet.com/ + +{{% docs/reference %}} +[Configure]: "/docs/agent/ -> /docs/agent//flow/tasks/configure/configure-linux.md" +[Configure]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tasks/configure/configure-linux.md" +{{% /docs/reference %}} diff --git a/docs/sources/flow/get-started/install/windows.md b/docs/sources/flow/get-started/install/windows.md index 2be2fabc6019..a20ed3449792 100644 --- a/docs/sources/flow/get-started/install/windows.md +++ b/docs/sources/flow/get-started/install/windows.md @@ -52,7 +52,7 @@ To do a silent install of {{< param "PRODUCT_NAME" >}} on Windows, perform the f 1. Run the following command in PowerShell or Command Prompt: - ```shell + ```cmd /S ``` diff --git a/docs/sources/flow/get-started/run/binary.md b/docs/sources/flow/get-started/run/binary.md index 7f9fda22ff77..0b9ac5b7d74a 100644 --- a/docs/sources/flow/get-started/run/binary.md +++ b/docs/sources/flow/get-started/run/binary.md @@ -46,9 +46,9 @@ Replace the following: You can set up and manage the standalone binary for {{< param "PRODUCT_NAME" >}} as a Linux systemd service. -{{% admonition type="note" %}} +{{< admonition type="note" >}} These steps assume you have a default systemd and {{< param "PRODUCT_NAME" >}} configuration. -{{% /admonition %}} +{{< /admonition >}} 1. To create a new user called `grafana-agent-flow` run the following command in a terminal window: diff --git a/docs/sources/flow/reference/cli/convert.md b/docs/sources/flow/reference/cli/convert.md index a9a3810ec3ee..3b44d662e87a 100644 --- a/docs/sources/flow/reference/cli/convert.md +++ b/docs/sources/flow/reference/cli/convert.md @@ -21,13 +21,13 @@ The `convert` command converts a supported configuration format to {{< param "PR Usage: -* `AGENT_MODE=flow grafana-agent convert [FLAG ...] FILE_NAME` -* `grafana-agent-flow convert [FLAG ...] FILE_NAME` +* `AGENT_MODE=flow grafana-agent convert [ ...] ` +* `grafana-agent-flow convert [ ...] ` Replace the following: - * `FLAG`: One or more flags that define the input and output of the command. - * `FILE_NAME`: The {{< param "PRODUCT_ROOT_NAME" >}} configuration file. + * _``_: One or more flags that define the input and output of the command. + * _``_: The {{< param "PRODUCT_ROOT_NAME" >}} configuration file. If the `FILE_NAME` argument isn't provided or if the `FILE_NAME` argument is equal to `-`, `convert` converts the contents of standard input. Otherwise, @@ -70,7 +70,7 @@ where an output can still be generated. These can be bypassed using the ### Prometheus -Using the `--source-format=prometheus` will convert the source config from +Using the `--source-format=prometheus` will convert the source configuration from [Prometheus v2.45](https://prometheus.io/docs/prometheus/2.45/configuration/configuration/) to {{< param "PRODUCT_NAME" >}} configuration. diff --git a/docs/sources/flow/reference/cli/tools.md b/docs/sources/flow/reference/cli/tools.md index b45e7f215a23..b9fb73a761bd 100644 --- a/docs/sources/flow/reference/cli/tools.md +++ b/docs/sources/flow/reference/cli/tools.md @@ -15,10 +15,10 @@ weight: 400 The `tools` command contains command line tooling grouped by Flow component. -{{% admonition type="caution" %}} +{{< admonition type="caution" >}} Utilities in this command have no backward compatibility guarantees and may change or be removed between releases. -{{% /admonition %}} +{{< /admonition >}} ## Subcommands diff --git a/docs/sources/flow/reference/compatibility/_index.md b/docs/sources/flow/reference/compatibility/_index.md index 633433ef6768..cdd9426cfb27 100644 --- a/docs/sources/flow/reference/compatibility/_index.md +++ b/docs/sources/flow/reference/compatibility/_index.md @@ -4,7 +4,7 @@ aliases: - /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/compatible-components/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/compatible-components/ - /docs/grafana-cloud/send-data/agent/flow/reference/compatible-components/ -canonical: https://grafana.com/docs/agent/latest/flow/reference/compatible-components/ +canonical: https://grafana.com/docs/agent/latest/flow/reference/compatibility/ description: Learn about which components are compatible with each other in Grafana Agent Flow title: Compatible components weight: 400 @@ -12,23 +12,19 @@ weight: 400 # Compatible components -This section provides an overview of _some_ of the possible connections between -compatible components in Grafana Agent Flow. +This section provides an overview of _some_ of the possible connections between compatible components in {{< param "PRODUCT_NAME" >}}. -For each common data type, we provide a list of compatible components -that can export or consume it. - -{{% admonition type="note" %}} +For each common data type, we provide a list of compatible components that can export or consume it. +{{< admonition type="note" >}} The type of export may not be the only requirement for chaining components together. The value of an attribute may matter as well as its type. -Please refer to each component's documentation for more details on what values are acceptable. +Refer to each component's documentation for more details on what values are acceptable. For example: * A Prometheus component may always expect an `"__address__"` label inside a list of targets. * A `string` argument may only accept certain values like "traceID" or "spanID". - -{{% /admonition %}} +{{< /admonition >}} ## Targets @@ -155,7 +151,7 @@ The following components, grouped by namespace, _consume_ Targets. ## Prometheus `MetricsReceiver` -The Prometheus metrics are sent between components using `MetricsReceiver`s. +The Prometheus metrics are sent between components using `MetricsReceiver`s. `MetricsReceiver`s are [capsules]({{< relref "../../concepts/config-language/expressions/types_and_values/#capsules" >}}) that are exported by components that can receive Prometheus metrics. Components that can consume Prometheus metrics can be passed the `MetricsReceiver` as an argument. Use the @@ -181,7 +177,6 @@ The following components, grouped by namespace, _export_ Prometheus `MetricsRece ### Prometheus `MetricsReceiver` Consumers The following components, grouped by namespace, _consume_ Prometheus `MetricsReceiver`. - @@ -200,8 +195,6 @@ The following components, grouped by namespace, _consume_ Prometheus `MetricsRec - - ## Loki `LogsReceiver` `LogsReceiver` is a [capsule]({{< relref "../../concepts/config-language/expressions/types_and_values/#capsules" >}}) @@ -265,7 +258,6 @@ The following components, grouped by namespace, _consume_ Loki `LogsReceiver`. - ## OpenTelemetry `otelcol.Consumer` The OpenTelemetry data is sent between components using `otelcol.Consumer`s. @@ -298,6 +290,7 @@ The following components, grouped by namespace, _export_ OpenTelemetry `otelcol. - [otelcol.processor.k8sattributes]({{< relref "../components/otelcol.processor.k8sattributes.md" >}}) - [otelcol.processor.memory_limiter]({{< relref "../components/otelcol.processor.memory_limiter.md" >}}) - [otelcol.processor.probabilistic_sampler]({{< relref "../components/otelcol.processor.probabilistic_sampler.md" >}}) +- [otelcol.processor.resourcedetection]({{< relref "../components/otelcol.processor.resourcedetection.md" >}}) - [otelcol.processor.span]({{< relref "../components/otelcol.processor.span.md" >}}) - [otelcol.processor.tail_sampling]({{< relref "../components/otelcol.processor.tail_sampling.md" >}}) - [otelcol.processor.transform]({{< relref "../components/otelcol.processor.transform.md" >}}) @@ -326,6 +319,7 @@ The following components, grouped by namespace, _consume_ OpenTelemetry `otelcol - [otelcol.processor.k8sattributes]({{< relref "../components/otelcol.processor.k8sattributes.md" >}}) - [otelcol.processor.memory_limiter]({{< relref "../components/otelcol.processor.memory_limiter.md" >}}) - [otelcol.processor.probabilistic_sampler]({{< relref "../components/otelcol.processor.probabilistic_sampler.md" >}}) +- [otelcol.processor.resourcedetection]({{< relref "../components/otelcol.processor.resourcedetection.md" >}}) - [otelcol.processor.span]({{< relref "../components/otelcol.processor.span.md" >}}) - [otelcol.processor.tail_sampling]({{< relref "../components/otelcol.processor.tail_sampling.md" >}}) - [otelcol.processor.transform]({{< relref "../components/otelcol.processor.transform.md" >}}) @@ -341,8 +335,6 @@ The following components, grouped by namespace, _consume_ OpenTelemetry `otelcol - - ## Pyroscope `ProfilesReceiver` The Pyroscope profiles are sent between components using `ProfilesReceiver`s. @@ -376,4 +368,3 @@ The following components, grouped by namespace, _consume_ Pyroscope `ProfilesRec {{< /collapse >}} - diff --git a/docs/sources/flow/reference/components/discovery.azure.md b/docs/sources/flow/reference/components/discovery.azure.md index 83eceabdf7a6..94b38bbec2a0 100644 --- a/docs/sources/flow/reference/components/discovery.azure.md +++ b/docs/sources/flow/reference/components/discovery.azure.md @@ -158,11 +158,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.consul.md b/docs/sources/flow/reference/components/discovery.consul.md index c63f94b8017c..e45e6a3ec040 100644 --- a/docs/sources/flow/reference/components/discovery.consul.md +++ b/docs/sources/flow/reference/components/discovery.consul.md @@ -177,11 +177,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.consulagent.md b/docs/sources/flow/reference/components/discovery.consulagent.md index df923fed4496..5557410188b1 100644 --- a/docs/sources/flow/reference/components/discovery.consulagent.md +++ b/docs/sources/flow/reference/components/discovery.consulagent.md @@ -138,11 +138,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.digitalocean.md b/docs/sources/flow/reference/components/discovery.digitalocean.md index 2a64ba7f6bec..a24eabaa0803 100644 --- a/docs/sources/flow/reference/components/discovery.digitalocean.md +++ b/docs/sources/flow/reference/components/discovery.digitalocean.md @@ -128,11 +128,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.dns.md b/docs/sources/flow/reference/components/discovery.dns.md index d2f0217b1d73..70fb3a64b9e1 100644 --- a/docs/sources/flow/reference/components/discovery.dns.md +++ b/docs/sources/flow/reference/components/discovery.dns.md @@ -103,11 +103,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.docker.md b/docs/sources/flow/reference/components/discovery.docker.md index 4d6ce94d557f..5a8518f22873 100644 --- a/docs/sources/flow/reference/components/discovery.docker.md +++ b/docs/sources/flow/reference/components/discovery.docker.md @@ -225,11 +225,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.dockerswarm.md b/docs/sources/flow/reference/components/discovery.dockerswarm.md index 58c065fb06eb..c1a7f8616cee 100644 --- a/docs/sources/flow/reference/components/discovery.dockerswarm.md +++ b/docs/sources/flow/reference/components/discovery.dockerswarm.md @@ -248,11 +248,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.ec2.md b/docs/sources/flow/reference/components/discovery.ec2.md index 7f01ae48c6e0..cc7f49259594 100644 --- a/docs/sources/flow/reference/components/discovery.ec2.md +++ b/docs/sources/flow/reference/components/discovery.ec2.md @@ -175,11 +175,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.eureka.md b/docs/sources/flow/reference/components/discovery.eureka.md index 70ab3f8f666d..93c76d9d09f8 100644 --- a/docs/sources/flow/reference/components/discovery.eureka.md +++ b/docs/sources/flow/reference/components/discovery.eureka.md @@ -162,11 +162,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.file.md b/docs/sources/flow/reference/components/discovery.file.md index c8493e01e62a..a78c39feabf7 100644 --- a/docs/sources/flow/reference/components/discovery.file.md +++ b/docs/sources/flow/reference/components/discovery.file.md @@ -182,11 +182,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.gce.md b/docs/sources/flow/reference/components/discovery.gce.md index 5752a4ce51b1..1a662bec2911 100644 --- a/docs/sources/flow/reference/components/discovery.gce.md +++ b/docs/sources/flow/reference/components/discovery.gce.md @@ -122,11 +122,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.hetzner.md b/docs/sources/flow/reference/components/discovery.hetzner.md index c6922e685f66..f917f8417a84 100644 --- a/docs/sources/flow/reference/components/discovery.hetzner.md +++ b/docs/sources/flow/reference/components/discovery.hetzner.md @@ -186,11 +186,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.http.md b/docs/sources/flow/reference/components/discovery.http.md index 50ecf42dcc06..80639fe7077f 100644 --- a/docs/sources/flow/reference/components/discovery.http.md +++ b/docs/sources/flow/reference/components/discovery.http.md @@ -192,11 +192,9 @@ discovery.http "dynamic_targets" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.ionos.md b/docs/sources/flow/reference/components/discovery.ionos.md index 1c619a1641ac..378556ad7886 100644 --- a/docs/sources/flow/reference/components/discovery.ionos.md +++ b/docs/sources/flow/reference/components/discovery.ionos.md @@ -161,11 +161,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.kubelet.md b/docs/sources/flow/reference/components/discovery.kubelet.md index 7ef29244a01e..bf0e1085dcde 100644 --- a/docs/sources/flow/reference/components/discovery.kubelet.md +++ b/docs/sources/flow/reference/components/discovery.kubelet.md @@ -206,11 +206,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.kubernetes.md b/docs/sources/flow/reference/components/discovery.kubernetes.md index 1d4b2f9210c5..8745ff5ddd7a 100644 --- a/docs/sources/flow/reference/components/discovery.kubernetes.md +++ b/docs/sources/flow/reference/components/discovery.kubernetes.md @@ -466,10 +466,10 @@ Replace the following: This example limits the search to pods on the same node as this {{< param "PRODUCT_ROOT_NAME" >}}. This configuration could be useful if you are running {{< param "PRODUCT_ROOT_NAME" >}} as a DaemonSet. -{{% admonition type="note" %}} +{{< admonition type="note" >}} This example assumes you have used Helm chart to deploy {{< param "PRODUCT_NAME" >}} in Kubernetes and sets `HOSTNAME` to the Kubernetes host name. If you have a custom Kubernetes deployment, you must adapt this example to your configuration. -{{% /admonition %}} +{{< /admonition >}} ```river discovery.kubernetes "k8s_pods" { @@ -510,11 +510,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.kuma.md b/docs/sources/flow/reference/components/discovery.kuma.md index c498753f58ab..8763bc2eb357 100644 --- a/docs/sources/flow/reference/components/discovery.kuma.md +++ b/docs/sources/flow/reference/components/discovery.kuma.md @@ -145,11 +145,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.lightsail.md b/docs/sources/flow/reference/components/discovery.lightsail.md index 81bdb0c706b9..22868c58faeb 100644 --- a/docs/sources/flow/reference/components/discovery.lightsail.md +++ b/docs/sources/flow/reference/components/discovery.lightsail.md @@ -108,11 +108,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.linode.md b/docs/sources/flow/reference/components/discovery.linode.md index 77d01dbdf4e2..6a5733c9e6cc 100644 --- a/docs/sources/flow/reference/components/discovery.linode.md +++ b/docs/sources/flow/reference/components/discovery.linode.md @@ -20,9 +20,9 @@ discovery.linode "LABEL" { } ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} The linode APIv4 Token must be created with the scopes: `linodes:read_only`, `ips:read_only`, and `events:read_only`. -{{% /admonition %}} +{{< /admonition >}} ## Arguments @@ -185,11 +185,9 @@ prometheus.remote_write "demo" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.marathon.md b/docs/sources/flow/reference/components/discovery.marathon.md index b19ddb321c2c..43c50ab468de 100644 --- a/docs/sources/flow/reference/components/discovery.marathon.md +++ b/docs/sources/flow/reference/components/discovery.marathon.md @@ -155,11 +155,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.nerve.md b/docs/sources/flow/reference/components/discovery.nerve.md index 1334f6dea8e8..d8c7fc24bbe2 100644 --- a/docs/sources/flow/reference/components/discovery.nerve.md +++ b/docs/sources/flow/reference/components/discovery.nerve.md @@ -106,11 +106,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.nomad.md b/docs/sources/flow/reference/components/discovery.nomad.md index aebd128bb320..14c51bb6c72f 100644 --- a/docs/sources/flow/reference/components/discovery.nomad.md +++ b/docs/sources/flow/reference/components/discovery.nomad.md @@ -156,11 +156,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.openstack.md b/docs/sources/flow/reference/components/discovery.openstack.md index 83df98d8c41c..13b6f4924232 100644 --- a/docs/sources/flow/reference/components/discovery.openstack.md +++ b/docs/sources/flow/reference/components/discovery.openstack.md @@ -166,11 +166,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.ovhcloud.md b/docs/sources/flow/reference/components/discovery.ovhcloud.md index 453fcb3c1cfc..a433f4544a9a 100644 --- a/docs/sources/flow/reference/components/discovery.ovhcloud.md +++ b/docs/sources/flow/reference/components/discovery.ovhcloud.md @@ -155,11 +155,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.process.md b/docs/sources/flow/reference/components/discovery.process.md index f0d1eb184db2..839948d3d65b 100644 --- a/docs/sources/flow/reference/components/discovery.process.md +++ b/docs/sources/flow/reference/components/discovery.process.md @@ -15,9 +15,9 @@ title: discovery.process `discovery.process` discovers processes running on the local Linux OS. -{{% admonition type="note" %}} +{{< admonition type="note" >}} To use the `discovery.process` component you must run {{< param "PRODUCT_NAME" >}} as root and inside host PID namespace. -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -207,11 +207,9 @@ discovery.process "all" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/discovery.puppetdb.md b/docs/sources/flow/reference/components/discovery.puppetdb.md index a83d8454723c..c4e984bcd440 100644 --- a/docs/sources/flow/reference/components/discovery.puppetdb.md +++ b/docs/sources/flow/reference/components/discovery.puppetdb.md @@ -166,11 +166,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.relabel.md b/docs/sources/flow/reference/components/discovery.relabel.md index fb0928359273..5269f662f13f 100644 --- a/docs/sources/flow/reference/components/discovery.relabel.md +++ b/docs/sources/flow/reference/components/discovery.relabel.md @@ -135,11 +135,9 @@ discovery.relabel "keep_backend_only" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.scaleway.md b/docs/sources/flow/reference/components/discovery.scaleway.md index fc3ec8867212..f65aa6941346 100644 --- a/docs/sources/flow/reference/components/discovery.scaleway.md +++ b/docs/sources/flow/reference/components/discovery.scaleway.md @@ -183,11 +183,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.serverset.md b/docs/sources/flow/reference/components/discovery.serverset.md index 7eb43b5ee11d..a986c2966c18 100644 --- a/docs/sources/flow/reference/components/discovery.serverset.md +++ b/docs/sources/flow/reference/components/discovery.serverset.md @@ -104,11 +104,9 @@ prometheus.remote_write "default" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.triton.md b/docs/sources/flow/reference/components/discovery.triton.md index f48ae7f65b17..9a0c48d260cd 100644 --- a/docs/sources/flow/reference/components/discovery.triton.md +++ b/docs/sources/flow/reference/components/discovery.triton.md @@ -138,11 +138,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/discovery.uyuni.md b/docs/sources/flow/reference/components/discovery.uyuni.md index 42b77e8952b6..25909d8d5217 100644 --- a/docs/sources/flow/reference/components/discovery.uyuni.md +++ b/docs/sources/flow/reference/components/discovery.uyuni.md @@ -132,11 +132,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/faro.receiver.md b/docs/sources/flow/reference/components/faro.receiver.md index 3c15253f126a..72c95bfc195e 100644 --- a/docs/sources/flow/reference/components/faro.receiver.md +++ b/docs/sources/flow/reference/components/faro.receiver.md @@ -278,11 +278,9 @@ Replace the following: - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/local.file_match.md b/docs/sources/flow/reference/components/local.file_match.md index 8c3ff3a43062..c9a083198718 100644 --- a/docs/sources/flow/reference/components/local.file_match.md +++ b/docs/sources/flow/reference/components/local.file_match.md @@ -158,11 +158,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.echo.md b/docs/sources/flow/reference/components/loki.echo.md index 756ffa00ee18..8109de1b96d0 100644 --- a/docs/sources/flow/reference/components/loki.echo.md +++ b/docs/sources/flow/reference/components/loki.echo.md @@ -76,11 +76,9 @@ loki.echo "example" { } - Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.process.md b/docs/sources/flow/reference/components/loki.process.md index d1c8ef723bd9..91d25d7fdeee 100644 --- a/docs/sources/flow/reference/components/loki.process.md +++ b/docs/sources/flow/reference/components/loki.process.md @@ -382,7 +382,7 @@ following key-value pair to the set of extracted data. username: agent ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} Due to a limitation of the upstream jmespath library, you must wrap any string that contains a hyphen `-` in quotes so that it's not considered a numerical expression. @@ -394,7 +394,7 @@ You can use one of two options to circumvent this issue: 1. An escaped double quote. For example: `http_user_agent = "\"request_User-Agent\""` 1. A backtick quote. For example: ``http_user_agent = `"request_User-Agent"` `` -{{% /admonition %}} +{{< /admonition >}} ### stage.label_drop block @@ -581,9 +581,9 @@ The following arguments are supported: | `action` | `string` | The action to take when the selector matches the log line. Supported values are `"keep"` and `"drop"` | `"keep"` | no | | `drop_counter_reason` | `string` | A custom reason to report for dropped lines. | `"match_stage"` | no | -{{% admonition type="note" %}} +{{< admonition type="note" >}} The filters do not include label filter expressions such as `| label == "foobar"`. -{{% /admonition %}} +{{< /admonition >}} The `stage.match` block supports a number of `stage.*` inner blocks, like the top-level block. These are used to construct the nested set of stages to run if the @@ -1758,11 +1758,9 @@ loki.process "local" { - Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.relabel.md b/docs/sources/flow/reference/components/loki.relabel.md index 4344af151b22..f60f5b2d40b4 100644 --- a/docs/sources/flow/reference/components/loki.relabel.md +++ b/docs/sources/flow/reference/components/loki.relabel.md @@ -124,11 +124,9 @@ loki.relabel "keep_error_only" { - Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.api.md b/docs/sources/flow/reference/components/loki.source.api.md index afc2f3dad112..4c3f1fce87f3 100644 --- a/docs/sources/flow/reference/components/loki.source.api.md +++ b/docs/sources/flow/reference/components/loki.source.api.md @@ -126,11 +126,9 @@ loki.source.api "loki_push_api" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.awsfirehose.md b/docs/sources/flow/reference/components/loki.source.awsfirehose.md index 9b1d2c6d75c5..e621b750357d 100644 --- a/docs/sources/flow/reference/components/loki.source.awsfirehose.md +++ b/docs/sources/flow/reference/components/loki.source.awsfirehose.md @@ -120,9 +120,9 @@ The following blocks are supported inside the definition of `loki.source.awsfire ## Debug metrics The following are some of the metrics that are exposed when this component is used. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The metrics include labels such as `status_code` where relevant, which you can use to measure request success rates. -{{%/admonition %}} +{{< /admonition >}} - `loki_source_awsfirehose_request_errors` (counter): Count of errors while receiving a request. - `loki_source_awsfirehose_record_errors` (counter): Count of errors while decoding an individual record. @@ -206,11 +206,9 @@ loki.relabel "logging_origin" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md b/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md index fcbe22aa4880..f8aad7676b23 100644 --- a/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md +++ b/docs/sources/flow/reference/components/loki.source.azure_event_hubs.md @@ -143,11 +143,9 @@ loki.write "example" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.cloudflare.md b/docs/sources/flow/reference/components/loki.source.cloudflare.md index cee51de6a541..0ce0312fcdeb 100644 --- a/docs/sources/flow/reference/components/loki.source.cloudflare.md +++ b/docs/sources/flow/reference/components/loki.source.cloudflare.md @@ -218,11 +218,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.docker.md b/docs/sources/flow/reference/components/loki.source.docker.md index 02bf03175b20..79a1204199e1 100644 --- a/docs/sources/flow/reference/components/loki.source.docker.md +++ b/docs/sources/flow/reference/components/loki.source.docker.md @@ -172,11 +172,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.file.md b/docs/sources/flow/reference/components/loki.source.file.md index edb407593c1f..a581ac0da043 100644 --- a/docs/sources/flow/reference/components/loki.source.file.md +++ b/docs/sources/flow/reference/components/loki.source.file.md @@ -17,9 +17,9 @@ title: loki.source.file Multiple `loki.source.file` components can be specified by giving them different labels. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `loki.source.file` does not handle file discovery. You can use `local.file_match` for file discovery. Refer to the [File Globbing](#file-globbing) example for more information. -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -245,11 +245,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.gcplog.md b/docs/sources/flow/reference/components/loki.source.gcplog.md index 2ce88f73f398..4cb7cb59cbc0 100644 --- a/docs/sources/flow/reference/components/loki.source.gcplog.md +++ b/docs/sources/flow/reference/components/loki.source.gcplog.md @@ -202,11 +202,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.gelf.md b/docs/sources/flow/reference/components/loki.source.gelf.md index ac5796051be5..ccb0f7b37968 100644 --- a/docs/sources/flow/reference/components/loki.source.gelf.md +++ b/docs/sources/flow/reference/components/loki.source.gelf.md @@ -98,11 +98,9 @@ loki.write "endpoint" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.heroku.md b/docs/sources/flow/reference/components/loki.source.heroku.md index 8f2c01cea68c..df0df9a7bfc2 100644 --- a/docs/sources/flow/reference/components/loki.source.heroku.md +++ b/docs/sources/flow/reference/components/loki.source.heroku.md @@ -153,11 +153,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.journal.md b/docs/sources/flow/reference/components/loki.source.journal.md index 0448bd572d74..f80294e331ff 100644 --- a/docs/sources/flow/reference/components/loki.source.journal.md +++ b/docs/sources/flow/reference/components/loki.source.journal.md @@ -110,11 +110,9 @@ loki.write "endpoint" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.kafka.md b/docs/sources/flow/reference/components/loki.source.kafka.md index eb5e04217298..7f62ac3ff75e 100644 --- a/docs/sources/flow/reference/components/loki.source.kafka.md +++ b/docs/sources/flow/reference/components/loki.source.kafka.md @@ -183,11 +183,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.kubernetes.md b/docs/sources/flow/reference/components/loki.source.kubernetes.md index e9d19237aef6..a14e305d6d39 100644 --- a/docs/sources/flow/reference/components/loki.source.kubernetes.md +++ b/docs/sources/flow/reference/components/loki.source.kubernetes.md @@ -210,11 +210,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.kubernetes_events.md b/docs/sources/flow/reference/components/loki.source.kubernetes_events.md index 4447a915cfae..49a9f8b7d824 100644 --- a/docs/sources/flow/reference/components/loki.source.kubernetes_events.md +++ b/docs/sources/flow/reference/components/loki.source.kubernetes_events.md @@ -180,11 +180,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.podlogs.md b/docs/sources/flow/reference/components/loki.source.podlogs.md index 5e957c6ead09..2559fd95e055 100644 --- a/docs/sources/flow/reference/components/loki.source.podlogs.md +++ b/docs/sources/flow/reference/components/loki.source.podlogs.md @@ -299,11 +299,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.syslog.md b/docs/sources/flow/reference/components/loki.source.syslog.md index 017cc43ee0c5..c1c0900d4835 100644 --- a/docs/sources/flow/reference/components/loki.source.syslog.md +++ b/docs/sources/flow/reference/components/loki.source.syslog.md @@ -162,11 +162,9 @@ loki.write "local" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.source.windowsevent.md b/docs/sources/flow/reference/components/loki.source.windowsevent.md index bb41a62cc3eb..ae706b17c5d2 100644 --- a/docs/sources/flow/reference/components/loki.source.windowsevent.md +++ b/docs/sources/flow/reference/components/loki.source.windowsevent.md @@ -84,11 +84,9 @@ loki.write "endpoint" { - Components that export [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/loki.write.md b/docs/sources/flow/reference/components/loki.write.md index 75aad04f3f2a..946f72c5d324 100644 --- a/docs/sources/flow/reference/components/loki.write.md +++ b/docs/sources/flow/reference/components/loki.write.md @@ -243,11 +243,9 @@ Any labels that start with `__` will be removed before sending to the endpoint. - Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md b/docs/sources/flow/reference/components/mimir.rules.kubernetes.md index d5ba0e340255..fd5639a03bf4 100644 --- a/docs/sources/flow/reference/components/mimir.rules.kubernetes.md +++ b/docs/sources/flow/reference/components/mimir.rules.kubernetes.md @@ -47,18 +47,19 @@ mimir.rules.kubernetes "LABEL" { `mimir.rules.kubernetes` supports the following arguments: -Name | Type | Description | Default | Required --------------------------|------------|---------------------------------------------------------------------------------|---------|--------- -`address` | `string` | URL of the Mimir ruler. | | yes -`tenant_id` | `string` | Mimir tenant ID. | | no -`use_legacy_routes` | `bool` | Whether to use deprecated ruler API endpoints. | false | no -`sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no -`mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "agent" | no -`bearer_token` | `secret` | Bearer token to authenticate with. | | no -`bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no -`proxy_url` | `string` | HTTP proxy to proxy requests through. | | no -`follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no -`enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no +| Name | Type | Description | Default | Required | +| ------------------------ | ---------- | ------------------------------------------------------------------------------- | ------------- | -------- | +| `address` | `string` | URL of the Mimir ruler. | | yes | +| `tenant_id` | `string` | Mimir tenant ID. | | no | +| `use_legacy_routes` | `bool` | Whether to use [deprecated][gem-2_2] ruler API endpoints. | false | no | +| `prometheus_http_prefix` | `string` | Path prefix for [Mimir's Prometheus endpoint][gem-path-prefix]. | `/prometheus` | no | +| `sync_interval` | `duration` | Amount of time between reconciliations with Mimir. | "30s" | no | +| `mimir_namespace_prefix` | `string` | Prefix used to differentiate multiple {{< param "PRODUCT_NAME" >}} deployments. | "agent" | no | +| `bearer_token` | `secret` | Bearer token to authenticate with. | | no | +| `bearer_token_file` | `string` | File containing a bearer token to authenticate with. | | no | +| `proxy_url` | `string` | HTTP proxy to proxy requests through. | | no | +| `follow_redirects` | `bool` | Whether redirects returned by the server should be followed. | `true` | no | +| `enable_http2` | `bool` | Whether HTTP2 is supported for requests. | `true` | no | At most one of the following can be provided: - [`bearer_token` argument](#arguments). @@ -81,6 +82,13 @@ The `mimir_namespace_prefix` argument can be used to separate the rules managed by multiple {{< param "PRODUCT_NAME" >}} deployments across your infrastructure. It should be set to a unique value for each deployment. +If `use_legacy_routes` is set to `true`, `mimir.rules.kubernetes` contacts Mimir on a `/api/v1/rules` endpoint. + +If `prometheus_http_prefix` is set to `/mimir`, `mimir.rules.kubernetes` contacts Mimir on a `/mimir/config/v1/rules` endpoint. +This is useful if you configure Mimir to use a different [prefix][gem-path-prefix] for its Prometheus endpoints than the default one. + +`prometheus_http_prefix` is ignored if `use_legacy_routes` is set to `true`. + ## Blocks The following blocks are supported inside the definition of diff --git a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md b/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md index ab3e55b5521f..8feb3dbff49e 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md +++ b/docs/sources/flow/reference/components/otelcol.connector.servicegraph.md @@ -231,11 +231,9 @@ traces_service_graph_request_failed_total{client="shop-backend",client_http_meth - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md b/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md index 5811b64b7733..1c49cd59554d 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md +++ b/docs/sources/flow/reference/components/otelcol.connector.spanlogs.md @@ -291,11 +291,9 @@ For an input trace like this... - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md index 23c2eaa0a24d..80004502676a 100644 --- a/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md +++ b/docs/sources/flow/reference/components/otelcol.connector.spanmetrics.md @@ -751,13 +751,13 @@ The example below uses the [merge_maps][] OTTL function. If the resource attributes are not treated in either of the ways described above, an error such as this one could be logged by `prometheus.remote_write`: `the sample has been rejected because another sample with the same timestamp, but a different value, has already been ingested (err-mimir-sample-duplicate-timestamp)`. -{{% admonition type="note" %}} +{{< admonition type="note" >}} In order for a Prometheus `target_info` metric to be generated, the incoming spans resource scope attributes must contain `service.name` and `service.instance.id` attributes. The `target_info` metric will be generated for each resource scope, while OpenTelemetry metric names and attributes will be normalized to be compliant with Prometheus naming rules. -{{% /admonition %}} +{{< /admonition >}} [merge_maps]: https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/{{< param "OTEL_VERSION" >}}/pkg/ottl/ottlfuncs/README.md#merge_maps [prom-data-model]: https://prometheus.io/docs/concepts/data_model/ @@ -774,11 +774,9 @@ metric names and attributes will be normalized to be compliant with Prometheus n - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md b/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md index 4552adce44ce..59283441f97c 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.loadbalancing.md @@ -961,11 +961,9 @@ k3d cluster delete grafana-agent-lb-test - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/otelcol.exporter.logging.md b/docs/sources/flow/reference/components/otelcol.exporter.logging.md index c1e4c8413948..739f717426ea 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.logging.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.logging.md @@ -115,11 +115,9 @@ otelcol.exporter.logging "default" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.loki.md b/docs/sources/flow/reference/components/otelcol.exporter.loki.md index 9a314c3b5aae..ae14eba57f74 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.loki.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.loki.md @@ -171,11 +171,9 @@ loki.write "local" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md b/docs/sources/flow/reference/components/otelcol.exporter.otlp.md index a03970cb3561..69f2700659aa 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlp.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.otlp.md @@ -222,11 +222,9 @@ otelcol.auth.basic "grafana_cloud_tempo" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md index 14d0c5112fad..a6cb0e4c5832 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.otlphttp.md @@ -163,11 +163,9 @@ otelcol.exporter.otlphttp "tempo" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md b/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md index 4285f34cc799..bed0cdd6e48c 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md @@ -58,7 +58,7 @@ When `include_scope_labels` is `true` the `otel_scope_name` and When `include_target_info` is true, OpenTelemetry Collector resources are converted into `target_info` metrics. -{{% admonition type="note" %}} +{{< admonition type="note" >}} OTLP metrics can have a lot of resource attributes. Setting `resource_to_telemetry_conversion` to `true` would convert all of them to Prometheus labels, which may not be what you want. @@ -68,7 +68,7 @@ See [Creating Prometheus labels from OTLP resource attributes][] for an example. [Creating Prometheus labels from OTLP resource attributes]: #creating-prometheus-labels-from-otlp-resource-attributes -{{% /admonition %}} +{{< /admonition >}} ## Exported fields @@ -183,11 +183,9 @@ prometheus.remote_write "mimir" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.attributes.md b/docs/sources/flow/reference/components/otelcol.processor.attributes.md index ae1b1eafe555..febcbb934664 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.attributes.md +++ b/docs/sources/flow/reference/components/otelcol.processor.attributes.md @@ -166,11 +166,11 @@ For example, adding a `span_names` filter could cause the component to error if The `exclude` block provides an option to exclude data from being fed into the [action] blocks based on the properties of a span, log, or metric records. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Signals excluded by the `exclude` block will still be propagated to downstream components as-is. If you would like to not propagate certain signals to downstream components, consider a processor such as [otelcol.processor.tail_sampling]({{< relref "./otelcol.processor.tail_sampling.md" >}}). -{{% /admonition %}} +{{< /admonition >}} {{< docs/shared lookup="flow/reference/components/match-properties-block.md" source="agent" version="" >}} @@ -646,11 +646,9 @@ otelcol.processor.attributes "default" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.batch.md b/docs/sources/flow/reference/components/otelcol.processor.batch.md index 7a8eff522ff5..cf8b42d534f8 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.batch.md +++ b/docs/sources/flow/reference/components/otelcol.processor.batch.md @@ -239,11 +239,9 @@ otelcol.exporter.otlp "production" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.discovery.md b/docs/sources/flow/reference/components/otelcol.processor.discovery.md index 9d9b7c05e3a3..258f821a789d 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.discovery.md +++ b/docs/sources/flow/reference/components/otelcol.processor.discovery.md @@ -17,15 +17,15 @@ of labels for each discovered target. `otelcol.processor.discovery` adds resource attributes to spans which have a hostname matching the one in the `__address__` label provided by the `discovery.*` component. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `otelcol.processor.discovery` is a custom component unrelated to any processors from the OpenTelemetry Collector. -{{% /admonition %}} +{{< /admonition >}} Multiple `otelcol.processor.discovery` components can be specified by giving them different labels. -{{% admonition type="note" %}} +{{< admonition type="note" >}} It can be difficult to follow [OpenTelemetry semantic conventions][OTEL sem conv] when adding resource attributes via `otelcol.processor.discovery`: * `discovery.relabel` and most `discovery.*` processes such as `discovery.kubernetes` @@ -47,7 +47,7 @@ from Static mode's `prom_sd_operation_type`/`prom_sd_pod_associations` [configur [Prometheus data model]: https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels [OTEL sem conv]: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/README.md [Traces]: {{< relref "../../../static/configuration/traces-config.md" >}} -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -205,11 +205,9 @@ otelcol.processor.discovery "default" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/otelcol.processor.filter.md b/docs/sources/flow/reference/components/otelcol.processor.filter.md index 49a11028a80c..7fe282407be5 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.filter.md +++ b/docs/sources/flow/reference/components/otelcol.processor.filter.md @@ -39,22 +39,22 @@ the following metrics-only functions are used exclusively by the processor: * `end_time_unix_nano - start_time_unix_nano` * `sum([1, 2, 3, 4]) + (10 / 1) - 1` -{{% admonition type="note" %}} +{{< admonition type="note" >}} Raw River strings can be used to write OTTL statements. For example, the OTTL statement `attributes["grpc"] == true` is written in River as \`attributes["grpc"] == true\` -{{% /admonition %}} +{{< /admonition >}} -{{% admonition type="note" %}} +{{< admonition type="note" >}} `otelcol.processor.filter` is a wrapper over the upstream OpenTelemetry Collector `filter` processor. If necessary, bug reports or feature requests will be redirected to the upstream repository. -{{% /admonition %}} +{{< /admonition >}} You can specify multiple `otelcol.processor.filter` components by giving them different labels. -{{% admonition type="warning" %}} +{{< admonition type="warning" >}} Exercise caution when using `otelcol.processor.filter`: - Make sure you understand schema/format of the incoming data and test the configuration thoroughly. @@ -64,7 +64,7 @@ Exercise caution when using `otelcol.processor.filter`: if the log references the dropped span. [Orphaned Telemetry]: https://github.com/open-telemetry/opentelemetry-collector/blob/v0.85.0/docs/standard-warnings.md#orphaned-telemetry -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -316,11 +316,9 @@ Some values in the River strings are [escaped][river-strings]: - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md b/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md index 6e16dcebcd48..8d3d9601065c 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md +++ b/docs/sources/flow/reference/components/otelcol.processor.k8sattributes.md @@ -14,11 +14,11 @@ title: otelcol.processor.k8sattributes `otelcol.processor.k8sattributes` accepts telemetry data from other `otelcol` components and adds Kubernetes metadata to the resource attributes of spans, logs, or metrics. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `otelcol.processor.k8sattributes` is a wrapper over the upstream OpenTelemetry Collector `k8sattributes` processor. If necessary, bug reports or feature requests will be redirected to the upstream repository. -{{% /admonition %}} +{{< /admonition >}} You can specify multiple `otelcol.processor.k8sattributes` components by giving them different labels. @@ -422,11 +422,9 @@ prometheus.remote_write "mimir" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md b/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md index 9d1528adf70d..30a9f1614149 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md +++ b/docs/sources/flow/reference/components/otelcol.processor.memory_limiter.md @@ -121,11 +121,9 @@ information. - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md b/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md index a76c85b2a21b..de866428c515 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md +++ b/docs/sources/flow/reference/components/otelcol.processor.probabilistic_sampler.md @@ -15,11 +15,11 @@ title: otelcol.processor.probabilistic_sampler `otelcol.processor.probabilistic_sampler` accepts logs and traces data from other otelcol components and applies probabilistic sampling based on configuration options. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `otelcol.processor.probabilistic_sampler` is a wrapper over the upstream OpenTelemetry Collector Contrib `probabilistic_sampler` processor. If necessary, bug reports or feature requests will be redirected to the upstream repository. -{{% /admonition %}} +{{< /admonition >}} You can specify multiple `otelcol.processor.probabilistic_sampler` components by giving them different labels. @@ -157,11 +157,9 @@ otelcol.processor.probabilistic_sampler "default" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md b/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md new file mode 100644 index 000000000000..d6d476c481aa --- /dev/null +++ b/docs/sources/flow/reference/components/otelcol.processor.resourcedetection.md @@ -0,0 +1,931 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/reference/components/otelcol.processor.resourcedetection/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/reference/components/otelcol.processor.resourcedetection/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/reference/components/otelcol.processor.resourcedetection/ +- /docs/grafana-cloud/send-data/agent/flow/reference/components/otelcol.processor.resourcedetection/ +canonical: https://grafana.com/docs/agent/latest/flow/reference/components/otelcol.processor.resourcedetection/ +labels: + stage: beta +title: otelcol.processor.resourcedetection +description: Learn about otelcol.processor.resourcedetection +--- + +# otelcol.processor.resourcedetection + +{{< docs/shared lookup="flow/stability/beta.md" source="agent" version="" >}} + +`otelcol.processor.resourcedetection` detects resource information from the host +in a format that conforms to the [OpenTelemetry resource semantic conventions](https://github.com/open-telemetry/opentelemetry-specification/tree/main/specification/resource/semantic_conventions/), and appends or +overrides the resource values in the telemetry data with this information. + +{{< admonition type="note" >}} +`otelcol.processor.resourcedetection` is a wrapper over the upstream +OpenTelemetry Collector Contrib `resourcedetection` processor. If necessary, +bug reports or feature requests are redirected to the upstream repository. +{{< /admonition >}} + +You can specify multiple `otelcol.processor.resourcedetection` components by giving them +different labels. + +## Usage + +```river +otelcol.processor.resourcedetection "LABEL" { + output { + logs = [...] + metrics = [...] + traces = [...] + } +} +``` + +## Arguments + +`otelcol.processor.resourcedetection` supports the following arguments: + +Name | Type | Description | Default | Required +----------- | -------------- | ----------------------------------------------------------------------------------- |---------- | -------- +`detectors` | `list(string)` | An ordered list of named detectors used to detect resource information. | `["env"]` | no +`override` | `bool` | Configures whether existing resource attributes should be overridden or preserved. | `true` | no +`timeout` | `duration` | Timeout by which all specified detectors must complete. | `"5s"` | no + +`detectors` could contain the following values: +* `env` +* `ec2` +* `ecs` +* `eks` +* `elasticbeanstalk` +* `lambda` +* `azure` +* `aks` +* `consul` +* `docker` +* `gcp` +* `heroku` +* `system` +* `openshift` +* `kubernetes_node` + +`env` is the only detector that is not configured through a River block. +The `env` detector reads resource information from the `OTEL_RESOURCE_ATTRIBUTES` environment variable. +This variable must be in the format `=,=,...`, +the details of which are currently pending confirmation in the OpenTelemetry specification. + +If a detector other than `env` is needed, you can customize it with the relevant River block. +For example, you can customize the `ec2` detector with the [ec2][] block. +If you omit the [ec2][] block, the defaults specified in the [ec2][] block documentation are used. + +If multiple detectors are inserting the same attribute name, the first detector to insert wins. +For example, if you had `detectors = ["eks", "ec2"]` then `cloud.platform` will be `aws_eks` instead of `ec2`. + +The following order is recommended for AWS: + 1. [lambda][] + 1. [elasticbeanstalk][] + 1. [eks][] + 1. [ecs][] + 1. [ec2][] + +## Blocks + +The following blocks are supported inside the definition of `otelcol.processor.resourcedetection`: + +Hierarchy | Block | Description | Required +----------------- | --------------------- | ------------------------------------------------- | -------- +output | [output][] | Configures where to send received telemetry data. | yes +ec2 | [ec2][] | | no +ecs | [ecs][] | | no +eks | [eks][] | | no +elasticbeanstalk | [elasticbeanstalk][] | | no +lambda | [lambda][] | | no +azure | [azure][] | | no +aks | [aks][] | | no +consul | [consul][] | | no +docker | [docker][] | | no +gcp | [gcp][] | | no +heroku | [heroku][] | | no +system | [system][] | | no +openshift | [openshift][] | | no +kubernetes_node | [kubernetes_node][] | | no + +[output]: #output +[ec2]: #ec2 +[ecs]: #ecs +[eks]: #eks +[elasticbeanstalk]: #elasticbeanstalk +[lambda]: #lambda +[azure]: #azure +[aks]: #aks +[consul]: #consul +[docker]: #docker +[gcp]: #gcp +[heroku]: #heroku +[system]: #system +[openshift]: #openshift +[kubernetes_node]: #kubernetes_node + +[res-attr-cfg]: #resource-attribute-config + +### output + +{{< docs/shared lookup="flow/reference/components/output-block.md" source="agent" version="" >}} + +### ec2 + +The `ec2` block reads resource information from the [EC2 instance metadata API] using the [AWS SDK for Go][]. + +The `ec2` block supports the following attributes: + +Attribute | Type | Description | Default | Required +----------- |----------------| --------------------------------------------------------------------------- |-------------| -------- +`tags` | `list(string)` | A list of regular expressions to match against tag keys of an EC2 instance. | `[]` | no + +If you are using a proxy server on your EC2 instance, it's important that you exempt requests for instance metadata as described in the [AWS cli user guide][]. +Failing to do so can result in proxied or missing instance data. + +If the instance is part of AWS ParallelCluster and the detector is failing to connect to the metadata server, +check the iptable and make sure the chain `PARALLELCLUSTER_IMDS` contains a rule that allows the {{< param "PRODUCT_ROOT_NAME" >}} user to access `169.254.169.254/32`. + +[AWS SDK for Go]: https://docs.aws.amazon.com/sdk-for-go/api/aws/ec2metadata/ +[EC2 instance metadata API]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html +[AWS cli user guide]: https://github.com/awsdocs/aws-cli-user-guide/blob/a2393582590b64bd2a1d9978af15b350e1f9eb8e/doc_source/cli-configure-proxy.md#using-a-proxy-on-amazon-ec2-instances + +`tags` can be used to gather tags for the EC2 instance which {{< param "PRODUCT_ROOT_NAME" >}} is running on. +To fetch EC2 tags, the IAM role assigned to the EC2 instance must have a policy that includes the `ec2:DescribeTags` permission. + +The `ec2` block supports the following blocks: + +Block | Description | Required +---------------------------------------------- | ------------------------------------------------- | -------- +[resource_attributes](#ec2--resource_attributes) | Configures which resource attributes to add. | no + +##### ec2 > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------------------- | --------------------------------------------------------------------------------------------------- | -------- +[cloud.account.id][res-attr-cfg] | Toggles the `cloud.account.id` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.availability_zone][res-attr-cfg] | Toggles the `cloud.availability_zone` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[host.id][res-attr-cfg] | Toggles the `host.id` resource attribute.
Sets `enabled` to `true` by default. | no +[host.image.id][res-attr-cfg] | Toggles the `host.image.id` resource attribute.
Sets `enabled` to `true` by default. | no +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no +[host.type][res-attr-cfg] | Toggles the `host.type` resource attribute.
Sets `enabled` to `true` by default. | no + +### ecs + +The `ecs` block queries the Task Metadata Endpoint (TMDE) to record information about the current ECS Task. Only TMDE V4 and V3 are supported. + +[Task Metadata Endpoint]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-metadata-endpoint.html + +The `ecs` block supports the following blocks: + +Block | Description | Required +-------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#ecs--resource_attributes) | Configures which resource attributes to add. | no + +#### ecs > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------------------- | --------------------------------------------------------------------------------------------------- | -------- +[aws.ecs.cluster.arn][res-attr-cfg] | Toggles the `aws.ecs.cluster.arn` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.ecs.launchtype][res-attr-cfg] | Toggles the `aws.ecs.launchtype` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.ecs.task.arn][res-attr-cfg] | Toggles the `aws.ecs.task.arn` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.ecs.task.family][res-attr-cfg] | Toggles the `aws.ecs.task.family` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.ecs.task.revision][res-attr-cfg] | Toggles the `aws.ecs.task.revision` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.log.group.arns][res-attr-cfg] | Toggles the `aws.log.group.arns` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.log.group.names][res-attr-cfg] | Toggles the `aws.log.group.names` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.log.stream.arns][res-attr-cfg] | Toggles the `aws.log.stream.arns` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.log.stream.names][res-attr-cfg] | Toggles the `aws.log.stream.names` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.account.id][res-attr-cfg] | Toggles the `cloud.account.id` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.availability_zone][res-attr-cfg] | Toggles the `cloud.availability_zone` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no + +### eks + +The `eks` block adds resource attributes for Amazon EKS. + +The `eks` block supports the following blocks: + +Block | Description | Required +-------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#eks--resource_attributes) | Configures which resource attributes to add. | no + +#### eks > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +------------------------------- | ------------------------------------------------------------------------------------------- | -------- +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no + +Example values: +* `cloud.provider`: `"aws"` +* `cloud.platform`: `"aws_eks"` + +### elasticbeanstalk + +The `elasticbeanstalk` block reads the AWS X-Ray configuration file available on all Beanstalk instances with [X-Ray Enabled][]. + +[X-Ray Enabled]: https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/environment-configuration-debugging.html + +The `elasticbeanstalk` block supports the following blocks: + +Block | Description | Required +--------------------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#elasticbeanstalk--resource_attributes) | Configures which resource attributes to add. | no + +#### elasticbeanstalk > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------------- | --------------------------------------------------------------------------------------------- | -------- +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[deployment.envir][res-attr-cfg] | Toggles the `deployment.envir` resource attribute.
Sets `enabled` to `true` by default. | no +[service.instance][res-attr-cfg] | Toggles the `service.instance` resource attribute.
Sets `enabled` to `true` by default. | no +[service.version][res-attr-cfg] | Toggles the `service.version` resource attribute.
Sets `enabled` to `true` by default. | no + +Example values: +* `cloud.provider`: `"aws"` +* `cloud.platform`: `"aws_elastic_beanstalk"` + +### lambda + +The `lambda` block uses the AWS Lambda [runtime environment variables][lambda-env-vars] to retrieve various resource attributes. + +[lambda-env-vars]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-envvars.html#configuration-envvars-runtime + +The `lambda` block supports the following blocks: + +Block | Description | Required +----------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#lambda--resource_attributes) | Configures which resource attributes to add. | no + +#### lambda > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +------------------------------------- | --------------------------------------------------------------------------------------------------- | -------- +[aws.log.group.names][res-attr-cfg] | Toggles the `aws.log.group.names` resource attribute.
Sets `enabled` to `true` by default. | no +[aws.log.stream.names][res-attr-cfg] | Toggles the `aws.log.stream.names` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.instance][res-attr-cfg] | Toggles the `faas.instance` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.max_memory][res-attr-cfg] | Toggles the `faas.max_memory` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.name][res-attr-cfg] | Toggles the `faas.name` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.version][res-attr-cfg] | Toggles the `faas.version` resource attribute.
Sets `enabled` to `true` by default. | no + +[Cloud semantic conventions][]: +* `cloud.provider`: `"aws"` +* `cloud.platform`: `"aws_lambda"` +* `cloud.region`: `$AWS_REGION` + +[Function as a Service semantic conventions][] and [AWS Lambda semantic conventions][]: +* `faas.name`: `$AWS_LAMBDA_FUNCTION_NAME` +* `faas.version`: `$AWS_LAMBDA_FUNCTION_VERSION` +* `faas.instance`: `$AWS_LAMBDA_LOG_STREAM_NAME` +* `faas.max_memory`: `$AWS_LAMBDA_FUNCTION_MEMORY_SIZE` + +[AWS Logs semantic conventions][]: +* `aws.log.group.names`: `$AWS_LAMBDA_LOG_GROUP_NAME` +* `aws.log.stream.names`: `$AWS_LAMBDA_LOG_STREAM_NAME` + +[Cloud semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/cloud.md +[Function as a Service semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/faas.md +[AWS Lambda semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/instrumentation/aws-lambda.md#resource-detector +[AWS Logs semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/cloud_provider/aws/logs.md + +### azure + +The `azure` block queries the [Azure Instance Metadata Service][] to retrieve various resource attributes. + +[Azure Instance Metadata Service]: https://aka.ms/azureimds + +The `azure` block supports the following blocks: + +Block | Description | Required +---------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#azure--resource_attributes) | Configures which resource attributes to add. | no + +#### azure > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +-----------------------------------------|------------------------------------------------------------------------------------------------------|--------- +[azure.resourcegroup.name][res-attr-cfg] | Toggles the `azure.resourcegroup.name` resource attribute.
Sets `enabled` to `true` by default. | no +[azure.vm.name][res-attr-cfg] | Toggles the `azure.vm.name` resource attribute.
Sets `enabled` to `true` by default. | no +[azure.vm.scaleset.name][res-attr-cfg] | Toggles the `azure.vm.scaleset.name` resource attribute.
Sets `enabled` to `true` by default. | no +[azure.vm.size][res-attr-cfg] | Toggles the `azure.vm.size` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.account.id][res-attr-cfg] | Toggles the `cloud.account.id` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[host.id][res-attr-cfg] | Toggles the `host.id` resource attribute.
Sets `enabled` to `true` by default. | no +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no + +Example values: +* `cloud.provider`: `"azure"` +* `cloud.platform`: `"azure_vm"` + +### aks + +The `aks` block adds resource attributes related to Azure AKS. + +The `aks` block supports the following blocks: + +Block | Description | Required +-------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#aks--resource_attributes) | Configures which resource attributes to add. | no + +#### aks > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +------------------------------- | ------------------------------------------------------------------------------------------- | -------- +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no + +Example values: +* `cloud.provider`: `"azure"` +* `cloud.platform`: `"azure_vm"` + +### consul + +The `consul` block queries a Consul agent and reads its configuration endpoint to retrieve values for resource attributes. + +The `consul` block supports the following attributes: + +Attribute | Type | Description | Default | Required +-------------|----------------|-----------------------------------------------------------------------------------|---------|--------- +`address` | `string` | The address of the Consul server | `""` | no +`datacenter` | `string` | Datacenter to use. If not provided, the default agent datacenter is used. | `""` | no +`token` | `secret` | A per-request ACL token which overrides the Consul agent's default (empty) token. | `""` | no +`namespace` | `string` | The name of the namespace to send along for the request. | `""` | no +`meta` | `list(string)` | Allowlist of [Consul Metadata][] keys to use as resource attributes. | `[]` | no + +`token` is only required if [Consul's ACL System][] is enabled. + +[Consul Metadata]: https://www.consul.io/docs/agent/options#node_meta +[Consul's ACL System]: https://www.consul.io/docs/security/acl/acl-system + +The `consul` block supports the following blocks: + +Block | Description | Required +----------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#consul--resource_attributes) | Configures which resource attributes to add. | no + +#### consul > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +-----------------------------|------------------------------------------------------------------------------------------|--------- +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[host.id][res-attr-cfg] | Toggles the `host.id` resource attribute.
Sets `enabled` to `true` by default. | no +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no + +### docker + +The `docker` block queries the Docker daemon to retrieve various resource attributes from the host machine. + +You need to mount the Docker socket (`/var/run/docker.sock` on Linux) to contact the Docker daemon. +Docker detection does not work on MacOS. + +The `docker` block supports the following blocks: + +Block | Description | Required +----------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#docker--resource_attributes) | Configures which resource attributes to add. | no + +#### docker > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------|---------------------------------------------------------------------------------------|--------- +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no +[os.type][res-attr-cfg] | Toggles the `os.type` resource attribute.
Sets `enabled` to `true` by default. | no + +### gcp + +The `gcp` block detects resource attributes using the [Google Cloud Client Libraries for Go][], which reads resource information from the [GCP metadata server][]. +The detector also uses environment variables to identify which GCP platform the application is running on, and assigns appropriate resource attributes for that platform. + +Use the `gcp` detector regardless of the GCP platform {{< param "PRODUCT_ROOT_NAME" >}} is running on. + +[Google Cloud Client Libraries for Go]: https://github.com/googleapis/google-cloud-go +[GCP metadata server]: https://cloud.google.com/compute/docs/storing-retrieving-metadata + +The `gcp` block supports the following blocks: + +Block | Description | Required +-------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#gcp--resource_attributes) | Configures which resource attributes to add. | no + +#### gcp > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +---------------------------------------------|----------------------------------------------------------------------------------------------------------|--------- +[cloud.account.id][res-attr-cfg] | Toggles the `cloud.account.id` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.availability_zone][res-attr-cfg] | Toggles the `cloud.availability_zone` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.id][res-attr-cfg] | Toggles the `faas.id` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.instance][res-attr-cfg] | Toggles the `faas.instance` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.name][res-attr-cfg] | Toggles the `faas.name` resource attribute.
Sets `enabled` to `true` by default. | no +[faas.version][res-attr-cfg] | Toggles the `faas.version` resource attribute.
Sets `enabled` to `true` by default. | no +[gcp.cloud_run.job.execution][res-attr-cfg] | Toggles the `gcp.cloud_run.job.execution` resource attribute.
Sets `enabled` to `true` by default. | no +[gcp.cloud_run.job.task_index][res-attr-cfg] | Toggles the `gcp.cloud_run.job.task_index` resource attribute.
Sets `enabled` to `true` by default. | no +[gcp.gce.instance.hostname][res-attr-cfg] | Toggles the `gcp.gce.instance.hostname` resource attribute.
Sets `enabled` to `false` by default. | no +[gcp.gce.instance.name][res-attr-cfg] | Toggles the `gcp.gce.instance.name` resource attribute.
Sets `enabled` to `false` by default. | no +[host.id][res-attr-cfg] | Toggles the `host.id` resource attribute.
Sets `enabled` to `true` by default. | no +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no +[host.type][res-attr-cfg] | Toggles the `host.type` resource attribute.
Sets `enabled` to `true` by default. | no +[k8s.cluster.name][res-attr-cfg] | Toggles the `k8s.cluster.name` resource attribute.
Sets `enabled` to `true` by default. | no + +#### Google Compute Engine (GCE) metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_compute_engine"` +* `cloud.account.id`: project id +* `cloud.region`: e.g. `"us-central1"` +* `cloud.availability_zone`: e.g. `"us-central1-c"` +* `host.id`: instance id +* `host.name`: instance name +* `host.type`: machine type +* (optional) `gcp.gce.instance.hostname` +* (optional) `gcp.gce.instance.name` + +#### Google Kubernetes Engine (GKE) metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_kubernetes_engine"` +* `cloud.account.id`: project id +* `cloud.region`: only for regional GKE clusters; e.g. `"us-central1"` +* `cloud.availability_zone`: only for zonal GKE clusters; e.g. `"us-central1-c"` +* `k8s.cluster.name` +* `host.id`: instance id +* `host.name`: instance name; only when workload identity is disabled + +One known issue happens when GKE workload identity is enabled. The GCE metadata endpoints won't be available, +and the GKE resource detector won't be able to determine `host.name`. +If this happens, you can set `host.name` from one of the following resources: +- Get the `node.name` through the [downward API][] with the `env` detector. +- Get the Kubernetes node name from the Kubernetes API (with `k8s.io/client-go`). + +[downward API]: https://kubernetes.io/docs/concepts/workloads/pods/downward-api/ + +#### Google Cloud Run Services metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_cloud_run"` +* `cloud.account.id`: project id +* `cloud.region`: e.g. `"us-central1"` +* `faas.id`: instance id +* `faas.name`: service name +* `faas.version`: service revision + +#### Cloud Run Jobs metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_cloud_run"` +* `cloud.account.id`: project id +* `cloud.region`: e.g. `"us-central1"` +* `faas.id`: instance id +* `faas.name`: service name +* `gcp.cloud_run.job.execution`: e.g. `"my-service-ajg89"` +* `gcp.cloud_run.job.task_index`: e.g. `"0"` + +#### Google Cloud Functions metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_cloud_functions"` +* `cloud.account.id`: project id +* `cloud.region`: e.g. `"us-central1"` +* `faas.id`: instance id +* `faas.name`: function name +* `faas.version`: function version + +#### Google App Engine metadata + +* `cloud.provider`: `"gcp"` +* `cloud.platform`: `"gcp_app_engine"` +* `cloud.account.id`: project id +* `cloud.region`: e.g. `"us-central1"` +* `cloud.availability_zone`: e.g. `"us-central1-c"` +* `faas.id`: instance id +* `faas.name`: service name +* `faas.version`: service version + +### heroku + +The `heroku` block adds resource attributes derived from [Heroku dyno metadata][]. + +The `heroku` block supports the following blocks: + +Block | Description | Required +----------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#heroku--resource_attributes) | Configures which resource attributes to add. | no + +#### heroku > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------------------------------|---------------------------------------------------------------------------------------------------------------|--------- +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[heroku.app.id][res-attr-cfg] | Toggles the `heroku.app.id` resource attribute.
Sets `enabled` to `true` by default. | no +[heroku.dyno.id][res-attr-cfg] | Toggles the `heroku.dyno.id` resource attribute.
Sets `enabled` to `true` by default. | no +[heroku.release.commit][res-attr-cfg] | Toggles the `heroku.release.commit` resource attribute.
Sets `enabled` to `true` by default. | no +[heroku.release.creation_timestamp][res-attr-cfg] | Toggles the `heroku.release.creation_timestamp` resource attribute.
Sets `enabled` to `true` by default. | no +[service.instance.id][res-attr-cfg] | Toggles the `service.instance.id` resource attribute.
Sets `enabled` to `true` by default. | no +[service.name][res-attr-cfg] | Toggles the `service.name` resource attribute.
Sets `enabled` to `true` by default. | no +[service.version][res-attr-cfg] | Toggles the `service.version` resource attribute.
Sets `enabled` to `true` by default. | no + +When [Heroku dyno metadata][] is active, Heroku applications publish information through environment variables. +We map these environment variables to resource attributes as follows: + +| Dyno metadata environment variable | Resource attribute | +|------------------------------------|-------------------------------------| +| `HEROKU_APP_ID` | `heroku.app.id` | +| `HEROKU_APP_NAME` | `service.name` | +| `HEROKU_DYNO_ID` | `service.instance.id` | +| `HEROKU_RELEASE_CREATED_AT` | `heroku.release.creation_timestamp` | +| `HEROKU_RELEASE_VERSION` | `service.version` | +| `HEROKU_SLUG_COMMIT` | `heroku.release.commit` | + +For more information, see the [Heroku cloud provider documentation][] under the [OpenTelemetry specification semantic conventions][]. + +[Heroku dyno metadata]: https://devcenter.heroku.com/articles/dyno-metadata +[Heroku cloud provider documentation]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/cloud_provider/heroku.md +[OpenTelemetry specification semantic conventions]: https://github.com/open-telemetry/opentelemetry-specification + +### system + +The `system` block queries the host machine to retrieve various resource attributes. + +{{< admonition type="note" >}} + +Use the [Docker](#docker) detector if running {{< param "PRODUCT_ROOT_NAME" >}} as a Docker container. + +{{< /admonition >}} + +The `system` block supports the following attributes: + +Attribute | Type | Description | Default | Required +------------------ | --------------- | --------------------------------------------------------------------------- |---------------- | -------- +`hostname_sources` | `list(string)` | A priority list of sources from which the hostname will be fetched. | `["dns", "os"]` | no + +The valid options for `hostname_sources` are: +* `"dns"`: Uses multiple sources to get the fully qualified domain name. +Firstly, it looks up the host name in the local machine's `hosts` file. If that fails, it looks up the CNAME. +Lastly, if that fails, it does a reverse DNS query. Note: this hostname source may produce unreliable results on Windows. +To produce a FQDN, Windows hosts might have better results using the "lookup" hostname source, which is mentioned below. +* `"os"`: Provides the hostname provided by the local machine's kernel. +* `"cname"`: Provides the canonical name, as provided by `net.LookupCNAME` in the Go standard library. +Note: this hostname source may produce unreliable results on Windows. +* `"lookup"`: Does a reverse DNS lookup of the current host's IP address. + +In case of an error in fetching a hostname from a source, the next source from the list of `hostname_sources` will be considered. + +The `system` block supports the following blocks: + +Block | Description | Required +----------------------------------------------------|----------------------------------------------|--------- +[resource_attributes](#system--resource_attributes) | Configures which resource attributes to add. | no + +#### system > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +---------------------------------------|-----------------------------------------------------------------------------------------------------|--------- +[host.arch][res-attr-cfg] | Toggles the `host.arch` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.cache.l2.size][res-attr-cfg] | Toggles the `host.cpu.cache.l2.size` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.family][res-attr-cfg] | Toggles the `host.cpu.family` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.model.id][res-attr-cfg] | Toggles the `host.cpu.model.id` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.model.name][res-attr-cfg] | Toggles the `host.cpu.model.name` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.stepping][res-attr-cfg] | Toggles the `host.cpu.stepping` resource attribute.
Sets `enabled` to `false` by default. | no +[host.cpu.vendor.id][res-attr-cfg] | Toggles the `host.cpu.vendor.id` resource attribute.
Sets `enabled` to `false` by default. | no +[host.id][res-attr-cfg] | Toggles the `host.id` resource attribute.
Sets `enabled` to `false` by default. | no +[host.name][res-attr-cfg] | Toggles the `host.name` resource attribute.
Sets `enabled` to `true` by default. | no +[os.description][res-attr-cfg] | Toggles the `os.description` resource attribute.
Sets `enabled` to `false` by default. | no +[os.type][res-attr-cfg] | Toggles the `os.type` resource attribute.
Sets `enabled` to `true` by default. | no + +### openshift + +The `openshift` block queries the OpenShift and Kubernetes APIs to retrieve various resource attributes. + +The `openshift` block supports the following attributes: + +Attribute | Type | Description | Default | Required +---------- |---------- | ------------------------------------------------------- |-------------| -------- +`address` | `string` | Address of the OpenShift API server. | _See below_ | no +`token` | `string` | Token used to identify against the OpenShift API server.| "" | no + +The "get", "watch", and "list" permissions are required: + +```yaml +kind: ClusterRole +metadata: + name: grafana-agent +rules: +- apiGroups: ["config.openshift.io"] + resources: ["infrastructures", "infrastructures/status"] + verbs: ["get", "watch", "list"] +``` + +By default, the API address is determined from the environment variables `KUBERNETES_SERVICE_HOST`, +`KUBERNETES_SERVICE_PORT` and the service token is read from `/var/run/secrets/kubernetes.io/serviceaccount/token`. +If TLS is not explicitly disabled and no `ca_file` is configured, `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` is used. +The determination of the API address, `ca_file`, and the service token is skipped if they are set in the configuration. + +The `openshift` block supports the following blocks: + +Block | Description | Required +---------------------------------------------- | ---------------------------------------------------- | -------- +[resource_attributes](#openshift--resource_attributes) | Configures which resource attributes to add. | no +[tls](#openshift--tls) | TLS settings for the connection with the OpenShift API. | yes + +#### openshift > tls + +The `tls` block configures TLS settings used for the connection to the gRPC +server. + +{{< docs/shared lookup="flow/reference/components/otelcol-tls-config-block.md" source="agent" version="" >}} + +#### openshift > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +--------------------------------- | --------------------------------------------------------------------------------------------- | -------- +[cloud.platform][res-attr-cfg] | Toggles the `cloud.platform` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.provider][res-attr-cfg] | Toggles the `cloud.provider` resource attribute.
Sets `enabled` to `true` by default. | no +[cloud.region][res-attr-cfg] | Toggles the `cloud.region` resource attribute.
Sets `enabled` to `true` by default. | no +[k8s.cluster.name][res-attr-cfg] | Toggles the `k8s.cluster.name` resource attribute.
Sets `enabled` to `true` by default. | no + +### kubernetes_node + +The `kubernetes_node` block queries the Kubernetes API server to retrieve various node resource attributes. + +The `kubernetes_node` block supports the following attributes: + +Attribute | Type | Description | Default | Required +------------------- |--------- | ------------------------------------------------------------------------- |------------------ | -------- +`auth_type` | `string` | Configures how to authenticate to the K8s API server. | `"none"` | no +`context` | `string` | Override the current context when `auth_type` is set to `"kubeConfig"`. | `""` | no +`node_from_env_var` | `string` | The name of an environment variable from which to retrieve the node name. | `"K8S_NODE_NAME"` | no + +The "get" and "list" permissions are required: + +```yaml +kind: ClusterRole +metadata: + name: grafana-agent +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list"] +``` + +`auth_type` can be set to one of the following: +* `none`: no authentication. +* `serviceAccount`: use the standard service account token provided to the {{< param "PRODUCT_ROOT_NAME" >}} pod. +* `kubeConfig`: use credentials from `~/.kube/config`. + +The `kubernetes_node` block supports the following blocks: + +Block | Description | Required +---------------------------------------------- | ------------------------------------------------- | -------- +[resource_attributes](#kubernetes_node--resource_attributes) | Configures which resource attributes to add. | no + +#### kubernetes_node > resource_attributes + +The `resource_attributes` block supports the following blocks: + +Block | Description | Required +------------------------------ | ------------------------------------------------------------------------------------------ | -------- +[k8s.node.name][res-attr-cfg] | Toggles the `k8s.node.name` resource attribute.
Sets `enabled` to `true` by default. | no +[k8s.node.uid][res-attr-cfg] | Toggles the `k8s.node.uid` resource attribute.
Sets `enabled` to `true` by default. | no + +## Common configuration + +### Resource attribute config + +This block describes how to configure resource attributes such as `k8s.node.name` and `azure.vm.name`. +Every block is configured using the same set of attributes. +Only the default values for those attributes might differ across resource attributes. +For example, some resource attributes have `enabled` set to `true` by default, whereas others don't. + +The following attributes are supported: + +Attribute | Type | Description | Default | Required +--------- | ------- | ----------------------------------------------------------------------------------- |------------- | -------- +`enabled` | `bool` | Toggles whether to add the resource attribute to the span, log, or metric resource. | _See below_ | no + +To see the default value for `enabled`, refer to the tables in the sections above which list the resource attributes blocks. +The "Description" column will state either... + +> Sets `enabled` to `true` by default. + +... or: + +> Sets `enabled` to `false` by default. + +## Exported fields + +The following fields are exported and can be referenced by other components: + +Name | Type | Description +---- | ---- | ----------- +`input` | `otelcol.Consumer` | A value that other components can use to send telemetry data to. + +`input` accepts `otelcol.Consumer` OTLP-formatted data for any telemetry signal of these types: +* logs +* metrics +* traces + +## Component health + +`otelcol.processor.resourcedetection` is only reported as unhealthy if given an invalid +configuration. + +## Debug information + +`otelcol.processor.resourcedetection` doesn't expose any component-specific debug +information. + +## Examples + +### env detector + +If you set up a `OTEL_RESOURCE_ATTRIBUTES` environment variable with value of `TestKey=TestValue`, +then all logs, metrics, and traces have a resource attribute with a key `TestKey` and value of `TestValue`. + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["env"] + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +### env and ec2 + +There is no need to put in an `ec2 {}` River block. +The `ec2` defaults are applied automatically, as specified in [ec2][]. + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["env", "ec2"] + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +### ec2 with default resource attributes + +There is no need to put in a `ec2 {}` River block. +The `ec2` defaults are applied automatically, as specified in [ec2][]. + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["ec2"] + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +### ec2 with explicit resource attributes + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["ec2"] + ec2 { + tags = ["^tag1$", "^tag2$", "^label.*$"] + resource_attributes { + cloud.account.id { enabled = true } + cloud.availability_zone { enabled = true } + cloud.platform { enabled = true } + cloud.provider { enabled = true } + cloud.region { enabled = true } + host.id { enabled = true } + host.image.id { enabled = false } + host.name { enabled = false } + host.type { enabled = false } + } + } + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +### kubernetes_node + +This example uses the default `node_from_env_var` option of `K8S_NODE_NAME`. + +There is no need to put in a `kubernetes_node {}` River block. +The `kubernetes_node` defaults are applied automatically, as specified in [kubernetes_node][]. + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["kubernetes_node"] + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +You need to add this to your workload: + +```yaml + env: + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName +``` + +### kubernetes_node with a custom environment variable + +This example uses a custom `node_from_env_var` set to `my_custom_var`. + +```river +otelcol.processor.resourcedetection "default" { + detectors = ["kubernetes_node"] + kubernetes_node { + node_from_env_var = "my_custom_var" + } + + output { + logs = [otelcol.exporter.otlp.default.input] + metrics = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} +``` + +You need to add this to your workload: + +```yaml + env: + - name: my_custom_var + valueFrom: + fieldRef: + fieldPath: spec.nodeName +``` + + +## Compatible components + +`otelcol.processor.resourcedetection` can accept arguments from the following components: + +- Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) + +`otelcol.processor.resourcedetection` has exports that can be consumed by the following components: + +- Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) + +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} + + \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.span.md b/docs/sources/flow/reference/components/otelcol.processor.span.md index fe6985881007..ac909575cb1a 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.span.md +++ b/docs/sources/flow/reference/components/otelcol.processor.span.md @@ -400,11 +400,9 @@ otelcol.processor.span "default" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md b/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md index b6c6ccfdc0f7..cb651d67e4f0 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md +++ b/docs/sources/flow/reference/components/otelcol.processor.tail_sampling.md @@ -565,11 +565,9 @@ otelcol.exporter.otlp "production" { - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.processor.transform.md b/docs/sources/flow/reference/components/otelcol.processor.transform.md index 81967bb11c24..9a70c07e9509 100644 --- a/docs/sources/flow/reference/components/otelcol.processor.transform.md +++ b/docs/sources/flow/reference/components/otelcol.processor.transform.md @@ -42,7 +42,7 @@ there is also a set of metrics-only functions: * `end_time_unix_nano - start_time_unix_nano` * `sum([1, 2, 3, 4]) + (10 / 1) - 1` -{{% admonition type="note" %}} +{{< admonition type="note" >}} There are two ways of inputting strings in River configuration files: * Using quotation marks ([normal River strings][river-strings]). Characters such as `\` and `"` must be escaped by preceding them with a `\` character. @@ -57,17 +57,17 @@ Raw strings are generally more convenient for writing OTTL statements. [river-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#strings" >}} [river-raw-strings]: {{< relref "../../concepts/config-language/expressions/types_and_values.md/#raw-strings" >}} -{{% /admonition %}} +{{< /admonition >}} -{{% admonition type="note" %}} +{{< admonition type="note" >}} `otelcol.processor.transform` is a wrapper over the upstream OpenTelemetry Collector `transform` processor. If necessary, bug reports or feature requests will be redirected to the upstream repository. -{{% /admonition %}} +{{< /admonition >}} You can specify multiple `otelcol.processor.transform` components by giving them different labels. -{{% admonition type="warning" %}} +{{< admonition type="warning" >}} `otelcol.processor.transform` allows you to modify all aspects of your telemetry. Some specific risks are given below, but this is not an exhaustive list. It is important to understand your data before using this processor. @@ -88,7 +88,7 @@ to a new metric data type or can be used to create new metrics. [Orphaned Telemetry]: https://github.com/open-telemetry/opentelemetry-collector/blob/{{< param "OTEL_VERSION" >}}/docs/standard-warnings.md#orphaned-telemetry [no-op]: https://en.wikipedia.org/wiki/NOP_(code) [metrics data model]: https://github.com/open-telemetry/opentelemetry-specification/blob/main//specification/metrics/data-model.md -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -602,11 +602,9 @@ each `"` with a `\"`, and each `\` with a `\\` inside a [normal][river-strings] - Components that consume [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md b/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md index c19bb03dba77..4f584319fb6c 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.jaeger.md @@ -287,11 +287,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md b/docs/sources/flow/reference/components/otelcol.receiver.kafka.md index 28588420609d..abb89ef82fb3 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.kafka.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.kafka.md @@ -339,11 +339,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.loki.md b/docs/sources/flow/reference/components/otelcol.receiver.loki.md index 31d9877da882..c06b82cbe3dc 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.loki.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.loki.md @@ -112,11 +112,9 @@ otelcol.exporter.otlp "default" { - Components that consume [Loki `LogsReceiver`]({{< relref "../compatibility/#loki-logsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md b/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md index a6d7a5bb3ae3..ac694d890712 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.opencensus.md @@ -219,11 +219,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md b/docs/sources/flow/reference/components/otelcol.receiver.otlp.md index 134098ed2de4..862562508afd 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.otlp.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.otlp.md @@ -257,11 +257,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md b/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md index d0723aad80c4..7611b0955a4b 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.prometheus.md @@ -111,11 +111,9 @@ otelcol.exporter.otlp "default" { - Components that consume [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md b/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md index 11e6a0485e09..54891a882da4 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.vcenter.md @@ -230,11 +230,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md b/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md index 2dd3d8a9ccfb..5d6c903036d1 100644 --- a/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md +++ b/docs/sources/flow/reference/components/otelcol.receiver.zipkin.md @@ -152,11 +152,9 @@ otelcol.exporter.otlp "default" { - Components that export [OpenTelemetry `otelcol.Consumer`]({{< relref "../compatibility/#opentelemetry-otelcolconsumer-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.exporter.agent.md b/docs/sources/flow/reference/components/prometheus.exporter.agent.md index cb2dd5cda361..a4575bb08c1b 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.agent.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.agent.md @@ -8,7 +8,8 @@ title: prometheus.exporter.agent --- # prometheus.exporter.agent -The `prometheus.exporter.agent` component collects and exposes metrics about the agent itself. + +The `prometheus.exporter.agent` component collects and exposes metrics about {{< param "PRODUCT_NAME" >}} itself. ## Usage @@ -18,6 +19,7 @@ prometheus.exporter.agent "agent" { ``` ## Arguments + `prometheus.exporter.agent` accepts no arguments. ## Exported fields @@ -31,12 +33,12 @@ an invalid configuration. ## Debug information -`prometheus.exporter.agent` does not expose any component-specific +`prometheus.exporter.agent` doesn't expose any component-specific debug information. ## Debug metrics -`prometheus.exporter.agent` does not expose any component-specific +`prometheus.exporter.agent` doesn't expose any component-specific debug metrics. ## Example @@ -80,11 +82,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.apache.md b/docs/sources/flow/reference/components/prometheus.exporter.apache.md index 08f19fa2d1d9..d3f786083b37 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.apache.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.apache.md @@ -96,11 +96,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.azure.md b/docs/sources/flow/reference/components/prometheus.exporter.azure.md index ea8fa08cd912..1835e5e24745 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.azure.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.azure.md @@ -180,11 +180,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md b/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md index 23f334b2f1a6..fb2a2653e983 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.blackbox.md @@ -204,11 +204,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md b/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md index 02c923ebe898..b6cdf1f98e21 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.cadvisor.md @@ -135,11 +135,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md b/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md index 2c1682a5fccc..0aad4bd0d8e7 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md @@ -147,9 +147,9 @@ You can use the following blocks in`prometheus.exporter.cloudwatch` to configure | static > metric | [metric][] | Configures the list of metrics the job should scrape. Multiple metrics can be defined inside one job. | yes | | decoupled_scraping | [decoupled_scraping][] | Configures the decoupled scraping feature to retrieve metrics on a schedule and return the cached metrics. | no | -{{% admonition type="note" %}} +{{< admonition type="note" >}} The `static` and `discovery` blocks are marked as not required, but you must configure at least one static or discovery job. -{{% /admonition %}} +{{< /admonition >}} [discovery]: #discovery-block [static]: #static-block @@ -463,11 +463,9 @@ discovery job, the `type` field of each `discovery_job` must match either the de - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.consul.md b/docs/sources/flow/reference/components/prometheus.exporter.consul.md index 81185047459e..6a38931ad0d0 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.consul.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.consul.md @@ -106,11 +106,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md b/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md index 2f22e0048807..bf60a1fee166 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.dnsmasq.md @@ -96,11 +96,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md b/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md index 6feb9c683eeb..f7150a3d41b4 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.elasticsearch.md @@ -15,10 +15,10 @@ The `prometheus.exporter.elasticsearch` component embeds [elasticsearch_exporter](https://github.com/prometheus-community/elasticsearch_exporter) for the collection of metrics from ElasticSearch servers. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Currently, an Agent can only collect metrics from a single ElasticSearch server. However, the exporter can collect the metrics from all nodes through that server configured. -{{% /admonition %}} +{{< /admonition >}} We strongly recommend that you configure a separate user for the Agent, and give it only the strictly mandatory security privileges necessary for monitoring your node, as per the [official documentation](https://github.com/prometheus-community/elasticsearch_exporter#elasticsearch-7x-security-privileges). @@ -139,11 +139,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.gcp.md b/docs/sources/flow/reference/components/prometheus.exporter.gcp.md index e9a3d7ab2786..b7ff3158c372 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.gcp.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.gcp.md @@ -59,9 +59,9 @@ prometheus.exporter.gcp "pubsub" { You can use the following arguments to configure the exporter's behavior. Omitted fields take their default values. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Please note that if you are supplying a list of strings for the `extra_filters` argument, any string values within a particular filter string must be enclosed in escaped double quotes. For example, `loadbalancing.googleapis.com:resource.labels.backend_target_name="sample-value"` must be encoded as `"loadbalancing.googleapis.com:resource.labels.backend_target_name=\"sample-value\""` in the River config. -{{% /admonition %}} +{{< /admonition >}} | Name | Type | Description | Default | Required | | ------------------------- | -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | -------- | @@ -182,11 +182,9 @@ prometheus.exporter.gcp "lb_subset_with_filter" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.github.md b/docs/sources/flow/reference/components/prometheus.exporter.github.md index 753458562ab5..662617299da4 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.github.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.github.md @@ -104,11 +104,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.kafka.md b/docs/sources/flow/reference/components/prometheus.exporter.kafka.md index 59400eea67fe..1de06212f557 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.kafka.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.kafka.md @@ -116,11 +116,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.memcached.md b/docs/sources/flow/reference/components/prometheus.exporter.memcached.md index bd158d76a996..7e9cc9a53d87 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.memcached.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.memcached.md @@ -108,11 +108,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md b/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md index 1aa855542c06..4301eee4f4d2 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.mongodb.md @@ -13,9 +13,9 @@ title: prometheus.exporter.mongodb The `prometheus.exporter.mongodb` component embeds percona's [`mongodb_exporter`](https://github.com/percona/mongodb_exporter). -{{% admonition type="note" %}} +{{< admonition type="note" >}} This exporter doesn't collect metrics from multiple nodes. For this integration to work properly, you must have connect each node of your MongoDB cluster to a {{< param "PRODUCT_NAME" >}} instance. -{{% /admonition %}} +{{< /admonition >}} We strongly recommend configuring a separate user for {{< param "PRODUCT_NAME" >}}, giving it only the strictly mandatory security privileges necessary for monitoring your node. Refer to the [Percona documentation](https://github.com/percona/mongodb_exporter#permissions) for more information. @@ -97,11 +97,9 @@ prometheus.remote_write "default" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md b/docs/sources/flow/reference/components/prometheus.exporter.mssql.md index e2bcad76830e..6db00954f332 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.mssql.md @@ -339,11 +339,9 @@ queries: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mysql.md b/docs/sources/flow/reference/components/prometheus.exporter.mysql.md index 7c0cb90ae69f..edc1c1a5a49f 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mysql.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.mysql.md @@ -221,11 +221,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md b/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md index 10712ba290d5..4053acc074b0 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.oracledb.md @@ -109,11 +109,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md b/docs/sources/flow/reference/components/prometheus.exporter.postgres.md index 39cfd8770108..f50e9fd77709 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.postgres.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.postgres.md @@ -222,11 +222,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.process.md b/docs/sources/flow/reference/components/prometheus.exporter.process.md index ddd315f28797..da135994fd7b 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.process.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.process.md @@ -142,11 +142,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.redis.md b/docs/sources/flow/reference/components/prometheus.exporter.redis.md index cebbbdd02906..ccb114ea8db5 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.redis.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.redis.md @@ -140,11 +140,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.snmp.md b/docs/sources/flow/reference/components/prometheus.exporter.snmp.md index 1e69da7fb941..5bd05efed907 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.snmp.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.snmp.md @@ -14,9 +14,9 @@ title: prometheus.exporter.snmp The `prometheus.exporter.snmp` component embeds [`snmp_exporter`](https://github.com/prometheus/snmp_exporter). `snmp_exporter` lets you collect SNMP data and expose them as Prometheus metrics. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `prometheus.exporter.snmp` uses the latest configuration introduced in version 0.23 of the Prometheus `snmp_exporter`. -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -40,7 +40,8 @@ Omitted fields take their default values. | `config_file` | `string` | SNMP configuration file defining custom modules. | | no | | `config` | `string` or `secret` | SNMP configuration as inline string. | | no | -The `config_file` argument points to a YAML file defining which snmp_exporter modules to use. See [snmp_exporter](https://github.com/prometheus/snmp_exporter#generating-configuration) for details on how to generate a config file. +The `config_file` argument points to a YAML file defining which snmp_exporter modules to use. +Refer to [snmp_exporter](https://github.com/prometheus/snmp_exporter#generating-configuration) for details on how to generate a configuration file. The `config` argument must be a YAML document as string defining which SNMP modules and auths to use. `config` is typically loaded by using the exports of another component. For example, @@ -207,11 +208,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md b/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md index f384fd1a6805..9211f9424cbe 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.snowflake.md @@ -110,11 +110,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.squid.md b/docs/sources/flow/reference/components/prometheus.exporter.squid.md index 49a8639c129d..957297d4af4e 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.squid.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.squid.md @@ -102,11 +102,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.statsd.md b/docs/sources/flow/reference/components/prometheus.exporter.statsd.md index 2e00b8db35b0..d7b2e7fc48df 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.statsd.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.statsd.md @@ -135,11 +135,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.unix.md b/docs/sources/flow/reference/components/prometheus.exporter.unix.md index ab2d88c8175e..7f3f4ca935cf 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.unix.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.unix.md @@ -418,11 +418,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md b/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md index 61c951e9c71d..499805179f11 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.vsphere.md @@ -98,11 +98,9 @@ prometheus.remote_write "default" { - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.exporter.windows.md b/docs/sources/flow/reference/components/prometheus.exporter.windows.md index 8042b5458d1c..14e22d13d2b7 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.windows.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.windows.md @@ -14,12 +14,13 @@ The `prometheus.exporter.windows` component embeds [windows_exporter](https://github.com/prometheus-community/windows_exporter) which exposes a wide variety of hardware and OS metrics for Windows-based systems. -The `windows_exporter` itself comprises various _collectors_, which can be -enabled and disabled at will. For more information on collectors, refer to the -[`collectors-list`](#collectors-list) section. +The `windows_exporter` itself comprises various _collectors_, which you can enable and disable as needed. +For more information on collectors, refer to the [`collectors-list`](#collectors-list) section. -**Note** The black and white list config options are available for backwards compatibility but are deprecated. The include -and exclude config options are preferred going forward. +{{< admonition type="note" >}} +The black and white list configuration options are available for backwards compatibility but are deprecated. +The include and exclude configuration options are preferred going forward. +{{< /admonition >}} ## Usage @@ -29,17 +30,18 @@ prometheus.exporter.windows "LABEL" { ``` ## Arguments + The following arguments can be used to configure the exporter's behavior. All arguments are optional. Omitted fields take their default values. -| Name | Type | Description | Default | Required | -|----------------------|------------------|-------------------------------------------|---------|----------| -| `enabled_collectors` | `list(string)` | List of collectors to enable. | `["cpu","cs","logical_disk","net","os","service","system"]` | no | -| `timeout` | `duration` | Configure timeout for collecting metrics. | `4m` | no | +| Name | Type | Description | Default | Required | +|----------------------|----------------|-------------------------------------------|-------------------------------------------------------------|----------| +| `enabled_collectors` | `list(string)` | List of collectors to enable. | `["cpu","cs","logical_disk","net","os","service","system"]` | no | +| `timeout` | `duration` | Configure timeout for collecting metrics. | `4m` | no | -`enabled_collectors` defines a hand-picked list of enabled-by-default -collectors. If set, anything not provided in that list is disabled by -default. See the [Collectors list](#collectors-list) for the default set. +`enabled_collectors` defines a hand-picked list of enabled-by-default collectors. +If set, anything not provided in that list is disabled by default. +Refer to the [Collectors list](#collectors-list) for the default set. ## Blocks @@ -75,15 +77,17 @@ text_file | [text_file][] | Configures the text_file collector. | [text_file]: #textfile-block ### dfsr block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- + +Name | Type | Description | Default | Required +-----------------|----------------|------------------------------------------------------|------------------------------------|--------- `source_enabled` | `list(string)` | Comma-separated list of DFSR Perflib sources to use. | `["connection","folder","volume"]` | no ### exchange block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`enabled_list` | `string` | Comma-separated list of collectors to use. | `""` | no + +Name | Type | Description | Default | Required +---------------|----------|--------------------------------------------|---------|--------- +`enabled_list` | `string` | Comma-separated list of collectors to use. | `""` | no The collectors specified by `enabled_list` can include the following: @@ -101,86 +105,96 @@ For example, `enabled_list` may be set to `"AvailabilityService,OutlookWebAccess ### iis block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`app_exclude` | `string` | Regular expression of applications to ignore. | `""` | no -`app_include` | `string` | Regular expression of applications to report on. | `".*"` | no -`site_exclude` | `string` | Regular expression of sites to ignore. | `""` | no -`site_include` | `string` | Regular expression of sites to report on. | `".*"` | no + +Name | Type | Description | Default | Required +---------------|----------|--------------------------------------------------|---------|--------- +`app_exclude` | `string` | Regular expression of applications to ignore. | `""` | no +`app_include` | `string` | Regular expression of applications to report on. | `".*"` | no +`site_exclude` | `string` | Regular expression of sites to ignore. | `""` | no +`site_include` | `string` | Regular expression of sites to report on. | `".*"` | no ### logical_disk block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`exclude` | `string` | Regular expression of volumes to exclude. | `""` | no -`include` | `string` | Regular expression of volumes to include. | `".+"` | no + +Name | Type | Description | Default | Required +----------|----------|-------------------------------------------|---------|--------- +`exclude` | `string` | Regular expression of volumes to exclude. | `""` | no +`include` | `string` | Regular expression of volumes to include. | `".+"` | no Volume names must match the regular expression specified by `include` and must _not_ match the regular expression specified by `exclude` to be included. ### msmq block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`where_clause` | `string` | WQL 'where' clause to use in WMI metrics query. | `""` | no + +Name | Type | Description | Default | Required +---------------|----------|-------------------------------------------------|---------|--------- +`where_clause` | `string` | WQL 'where' clause to use in WMI metrics query. | `""` | no Specifying `enabled_classes` is useful to limit the response to the MSMQs you specify, reducing the size of the response. ### mssql block + Name | Type | Description | Default | Required ---- |----------| ----------- | ------- | -------- `enabled_classes` | `list(string)` | Comma-separated list of MSSQL WMI classes to use. | `["accessmethods", "availreplica", "bufman", "databases", "dbreplica", "genstats", "locks", "memmgr", "sqlstats", "sqlerrorstransactions"]` | no ### network block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`exclude` | `string` | Regular expression of NIC:s to exclude. | `""` | no -`include` | `string` | Regular expression of NIC:s to include. | `".*"` | no + +Name | Type | Description | Default | Required +----------|----------|-----------------------------------------|---------|--------- +`exclude` | `string` | Regular expression of NIC:s to exclude. | `""` | no +`include` | `string` | Regular expression of NIC:s to include. | `".*"` | no NIC names must match the regular expression specified by `include` and must _not_ match the regular expression specified by `exclude` to be included. ### process block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`exclude` | `string` | Regular expression of processes to exclude. | `""` | no -`include` | `string` | Regular expression of processes to include. | `".*"` | no + +Name | Type | Description | Default | Required +----------|----------|---------------------------------------------|---------|--------- +`exclude` | `string` | Regular expression of processes to exclude. | `""` | no +`include` | `string` | Regular expression of processes to include. | `".*"` | no Processes must match the regular expression specified by `include` and must _not_ match the regular expression specified by `exclude` to be included. ### scheduled_task block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`exclude` | `string` | Regexp of tasks to exclude. | `""` | no -`include` | `string` | Regexp of tasks to include. | `".+"` | no + +Name | Type | Description | Default | Required +----------|----------|-----------------------------|---------|--------- +`exclude` | `string` | Regexp of tasks to exclude. | `""` | no +`include` | `string` | Regexp of tasks to include. | `".+"` | no For a server name to be included, it must match the regular expression specified by `include` and must _not_ match the regular expression specified by `exclude`. ### service block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`use_api` | `string` | Use API calls to collect service data instead of WMI. | `false` | no -`where_clause` | `string` | WQL 'where' clause to use in WMI metrics query. | `""` | no + +Name | Type | Description | Default | Required +---------------|----------|-------------------------------------------------------|---------|--------- +`use_api` | `string` | Use API calls to collect service data instead of WMI. | `false` | no +`where_clause` | `string` | WQL 'where' clause to use in WMI metrics query. | `""` | no The `where_clause` argument can be used to limit the response to the services you specify, reducing the size of the response. If `use_api` is enabled, 'where_clause' won't be effective. ### smtp block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- -`exclude` | `string` | Regexp of virtual servers to ignore. | | no -`include` | `string` | Regexp of virtual servers to include. | `".+"` | no + +Name | Type | Description | Default | Required +----------|----------|---------------------------------------|---------|--------- +`exclude` | `string` | Regexp of virtual servers to ignore. | | no +`include` | `string` | Regexp of virtual servers to include. | `".+"` | no For a server name to be included, it must match the regular expression specified by `include` and must _not_ match the regular expression specified by `exclude`. ### text_file block -Name | Type | Description | Default | Required ----- |----------| ----------- | ------- | -------- + +Name | Type | Description | Default | Required +----------------------|----------|----------------------------------------------------|-------------------------------------------------------|--------- `text_file_directory` | `string` | The directory containing the files to be ingested. | `C:\Program Files\Grafana Agent Flow\textfile_inputs` | no When `text_file_directory` is set, only files with the extension `.prom` inside the specified directory are read. Each `.prom` file found must end with an empty line feed to work properly. @@ -270,12 +284,12 @@ Name | Description | Enabled by default [vmware_blast](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.vmware_blast.md) | VMware Blast session metrics | [vmware](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.vmware.md) | Performance counters installed by the Vmware Guest agent | -See the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples. +Refer to the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples. -{{% admonition type="caution" %}} -Certain collectors will cause {{< param "PRODUCT_ROOT_NAME" >}} to crash if those collectors are used and the required infrastructure is not installed. -These include but are not limited to mscluster_*, vmware, nps, dns, msmq, teradici_pcoip, ad, hyperv, and scheduled_task. -{{% /admonition %}} +{{< admonition type="caution" >}} +Certain collectors will cause {{< param "PRODUCT_ROOT_NAME" >}} to crash if those collectors are used and the required infrastructure isn't installed. +These include but aren't limited to mscluster_*, vmware, nps, dns, msmq, teradici_pcoip, ad, hyperv, and scheduled_task. +{{< /admonition >}} ## Example @@ -317,11 +331,9 @@ Replace the following: - Components that consume [Targets]({{< relref "../compatibility/#targets-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md b/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md index fa324640d0ee..b8ef773567ca 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md +++ b/docs/sources/flow/reference/components/prometheus.operator.podmonitors.md @@ -265,11 +265,9 @@ prometheus.operator.podmonitors "pods" { - Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.operator.probes.md b/docs/sources/flow/reference/components/prometheus.operator.probes.md index 256634a88438..c8fddb96e1dd 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.probes.md +++ b/docs/sources/flow/reference/components/prometheus.operator.probes.md @@ -267,11 +267,9 @@ prometheus.operator.probes "probes" { - Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md b/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md index 8b2e0ce29cdf..29a6414a6339 100644 --- a/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md +++ b/docs/sources/flow/reference/components/prometheus.operator.servicemonitors.md @@ -267,11 +267,9 @@ prometheus.operator.servicemonitors "services" { - Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.receive_http.md b/docs/sources/flow/reference/components/prometheus.receive_http.md index d48985cc3f18..38d43cef5067 100644 --- a/docs/sources/flow/reference/components/prometheus.receive_http.md +++ b/docs/sources/flow/reference/components/prometheus.receive_http.md @@ -138,11 +138,9 @@ prometheus.remote_write "local" { - Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.relabel.md b/docs/sources/flow/reference/components/prometheus.relabel.md index 65cb02394d4a..22d6c0a42d28 100644 --- a/docs/sources/flow/reference/components/prometheus.relabel.md +++ b/docs/sources/flow/reference/components/prometheus.relabel.md @@ -181,11 +181,9 @@ The two resulting metrics are then propagated to each receiver defined in the - Components that consume [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/prometheus.remote_write.md b/docs/sources/flow/reference/components/prometheus.remote_write.md index f869343e0919..5664cd10aa6e 100644 --- a/docs/sources/flow/reference/components/prometheus.remote_write.md +++ b/docs/sources/flow/reference/components/prometheus.remote_write.md @@ -418,11 +418,9 @@ Any labels that start with `__` will be removed before sending to the endpoint. - Components that consume [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/prometheus.scrape.md b/docs/sources/flow/reference/components/prometheus.scrape.md index 8adf775687f1..765eb084b32f 100644 --- a/docs/sources/flow/reference/components/prometheus.scrape.md +++ b/docs/sources/flow/reference/components/prometheus.scrape.md @@ -298,11 +298,9 @@ Special labels added after a scrape - Components that export [Prometheus `MetricsReceiver`]({{< relref "../compatibility/#prometheus-metricsreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/pyroscope.ebpf.md b/docs/sources/flow/reference/components/pyroscope.ebpf.md index a324e71293ab..590ad574baf9 100644 --- a/docs/sources/flow/reference/components/pyroscope.ebpf.md +++ b/docs/sources/flow/reference/components/pyroscope.ebpf.md @@ -18,9 +18,9 @@ title: pyroscope.ebpf `pyroscope.ebpf` configures an ebpf profiling job for the current host. The collected performance profiles are forwarded to the list of receivers passed in `forward_to`. -{{% admonition type="note" %}} +{{< admonition type="note" >}} To use the `pyroscope.ebpf` component you must run {{< param "PRODUCT_NAME" >}} as root and inside host pid namespace. -{{% /admonition %}} +{{< /admonition >}} You can specify multiple `pyroscope.ebpf` components by giving them different labels, however it is not recommended as it can lead to additional memory and CPU usage. @@ -95,16 +95,20 @@ can help you pin down a profiling target. | `__name__` | pyroscope metric name. Defaults to `process_cpu`. | | `__container_id__` | The container ID derived from target. | -### Container ID +### Targets -Each collected stack trace is then associated with a specified target from the targets list, determined by a -container ID. This association process involves checking the `__container_id__`, `__meta_docker_container_id`, -and `__meta_kubernetes_pod_container_id` labels of a target against the `/proc/{pid}/cgroup` of a process. +One of the following special labels _must_ be included in each target of `targets` and the label must correspond to the container or process that is profiled: -If a corresponding container ID is found, the stack traces are aggregated per target based on the container ID. -If a container ID is not found, the stack trace is associated with a `default_target`. +* `__container_id__`: The container ID. +* `__meta_docker_container_id`: The ID of the Docker container. +* `__meta_kubernetes_pod_container_id`: The ID of the Kubernetes pod container. +* `__process_pid__` : The process ID. -Any stack traces not associated with a listed target are ignored. +Each process is then associated with a specified target from the targets list, determined by a container ID or process PID. + +If a process's container ID matches a target's container ID label, the stack traces are aggregated per target based on the container ID. +If a process's PID matches a target's process PID label, the stack traces are aggregated per target based on the process PID. +Otherwise the process is not profiled. ### Service name @@ -298,11 +302,9 @@ pyroscope.ebpf "default" { - Components that export [Pyroscope `ProfilesReceiver`]({{< relref "../compatibility/#pyroscope-profilesreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/pyroscope.java.md b/docs/sources/flow/reference/components/pyroscope.java.md index a7cdb518ac13..92407132e99d 100644 --- a/docs/sources/flow/reference/components/pyroscope.java.md +++ b/docs/sources/flow/reference/components/pyroscope.java.md @@ -16,9 +16,9 @@ title: pyroscope.java `pyroscope.java` continuously profiles Java processes running on the local Linux OS using [async-profiler](https://github.com/async-profiler/async-profiler). -{{% admonition type="note" %}} +{{< admonition type="note" >}} To use the `pyroscope.java` component you must run {{< param "PRODUCT_NAME" >}} as root and inside host PID namespace. -{{% /admonition %}} +{{< /admonition >}} ## Usage @@ -57,11 +57,29 @@ async-profiler binaries for both glibc and musl into the directory with the foll After process profiling startup, the component detects libc type and copies according `libAsyncProfiler.so` into the target process file system at the exact same path. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The `asprof` binary runs with root permissions. If you change the `tmp_dir` configuration to something other than `/tmp`, then you must ensure that the directory is only writable by root. -{{% /admonition %}} +{{< /admonition >}} + +#### `targets` argument + +The special `__process_pid__` label _must always_ be present and corresponds to the +process PID that is used for profiling. + +Labels starting with a double underscore (`__`) are treated as _internal_, and are removed prior to scraping. + +The special label `service_name` is required and must always be present. +If it is not specified, `pyroscope.scrape` will attempt to infer it from +either of the following sources, in this order: +1. `__meta_kubernetes_pod_annotation_pyroscope_io_service_name` which is a `pyroscope.io/service_name` pod annotation. +2. `__meta_kubernetes_namespace` and `__meta_kubernetes_pod_container_name` +3. `__meta_docker_container_name` +4. `__meta_dockerswarm_container_label_service_name` or `__meta_dockerswarm_service_name` + +If `service_name` is not specified and could not be inferred, then it is set to `unspecified`. + ## Blocks The following blocks are supported inside the definition of @@ -163,11 +181,9 @@ pyroscope.java "java" { - Components that export [Pyroscope `ProfilesReceiver`]({{< relref "../compatibility/#pyroscope-profilesreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/components/pyroscope.scrape.md b/docs/sources/flow/reference/components/pyroscope.scrape.md index 74c1fa30e873..35e7022df482 100644 --- a/docs/sources/flow/reference/components/pyroscope.scrape.md +++ b/docs/sources/flow/reference/components/pyroscope.scrape.md @@ -114,6 +114,7 @@ either of the following sources, in this order: 1. `__meta_kubernetes_pod_annotation_pyroscope_io_service_name` which is a `pyroscope.io/service_name` pod annotation. 2. `__meta_kubernetes_namespace` and `__meta_kubernetes_pod_container_name` 3. `__meta_docker_container_name` +4. `__meta_dockerswarm_container_label_service_name` or `__meta_dockerswarm_service_name` If `service_name` is not specified and could not be inferred, then it is set to `unspecified`. @@ -522,11 +523,10 @@ discovery.http "dynamic_targets" { } pyroscope.scrape "local" { - targets = [ - {"__address__" = "localhost:4100", "service_name"="pyroscope"}, + targets = concat([ + {"__address__" = "localhost:4040", "service_name"="pyroscope"}, {"__address__" = "localhost:12345", "service_name"="agent"}, - discovery.http.dynamic_targets.targets, - ] + ], discovery.http.dynamic_targets.targets) forward_to = [pyroscope.write.local.receiver] } @@ -589,11 +589,9 @@ http://localhost:12345/debug/pprof/mutex - Components that export [Pyroscope `ProfilesReceiver`]({{< relref "../compatibility/#pyroscope-profilesreceiver-exporters" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} diff --git a/docs/sources/flow/reference/components/pyroscope.write.md b/docs/sources/flow/reference/components/pyroscope.write.md index 38b6b542abc0..3012be03319c 100644 --- a/docs/sources/flow/reference/components/pyroscope.write.md +++ b/docs/sources/flow/reference/components/pyroscope.write.md @@ -168,11 +168,9 @@ pyroscope.scrape "default" { - Components that consume [Pyroscope `ProfilesReceiver`]({{< relref "../compatibility/#pyroscope-profilesreceiver-consumers" >}}) -{{% admonition type="note" %}} - -Connecting some components may not be sensible or components may require further configuration to make the -connection work correctly. Refer to the linked documentation for more details. - -{{% /admonition %}} +{{< admonition type="note" >}} +Connecting some components may not be sensible or components may require further configuration to make the connection work correctly. +Refer to the linked documentation for more details. +{{< /admonition >}} \ No newline at end of file diff --git a/docs/sources/flow/reference/config-blocks/http.md b/docs/sources/flow/reference/config-blocks/http.md index 39ffa5b2502c..f90944c3ff59 100644 --- a/docs/sources/flow/reference/config-blocks/http.md +++ b/docs/sources/flow/reference/config-blocks/http.md @@ -50,12 +50,12 @@ tls > windows_certificate_filter > server | [server][] | Con The `tls` block configures TLS settings for the HTTP server. -{{% admonition type="warning" %}} +{{< admonition type="warning" >}} If you add the `tls` block and reload the configuration when {{< param "PRODUCT_NAME" >}} is running, existing connections will continue communicating over plaintext. Similarly, if you remove the `tls` block and reload the configuration when {{< param "PRODUCT_NAME" >}} is running, existing connections will continue communicating over TLS. To ensure all connections use TLS, configure the `tls` block before you start {{< param "PRODUCT_NAME" >}}. -{{% /admonition %}} +{{< /admonition >}} Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- @@ -159,12 +159,12 @@ the following TLS settings are overridden and will cause an error if defined. * `client_ca` * `client_ca_file` -{{% admonition type="warning" %}} +{{< admonition type="warning" >}} This feature is only available on Windows. TLS min and max may not be compatible with the certificate stored in the Windows certificate store. The `windows_certificate_filter` will serve the found certificate even if it is not compatible with the specified TLS version. -{{% /admonition %}} +{{< /admonition >}} ### server block diff --git a/docs/sources/flow/release-notes.md b/docs/sources/flow/release-notes.md index f8053bf3c0b3..baa91ae3d068 100644 --- a/docs/sources/flow/release-notes.md +++ b/docs/sources/flow/release-notes.md @@ -18,7 +18,7 @@ The release notes provide information about deprecations and breaking changes in For a complete list of changes to {{< param "PRODUCT_ROOT_NAME" >}}, with links to pull requests and related issues when available, refer to the [Changelog](https://github.com/grafana/agent/blob/main/CHANGELOG.md). -{{% admonition type="note" %}} +{{< admonition type="note" >}} These release notes are specific to {{< param "PRODUCT_NAME" >}}. Other release notes for the different {{< param "PRODUCT_ROOT_NAME" >}} variants are contained on separate pages: @@ -27,7 +27,19 @@ Other release notes for the different {{< param "PRODUCT_ROOT_NAME" >}} variants [release-notes-static]: {{< relref "../static/release-notes.md" >}} [release-notes-operator]: {{< relref "../operator/release-notes.md" >}} -{{% /admonition %}} +{{< /admonition >}} + +## v0.40 + +### Breaking change: Prohibit the configuration of services within modules. + +Previously it was possible to configure the HTTP service via the [HTTP config block](https://grafana.com/docs/agent/v0.39/flow/reference/config-blocks/http/) inside of a module. +This functionality is now only available in the main configuration. + +### Breaking change: Change the default value of `disable_high_cardinality_metrics` to `true`. + +The `disable_high_cardinality_metrics` configuration argument is used by `otelcol.exporter` components such as `otelcol.exporter.otlp`. +If you need to see high cardinality metrics containing labels such as IP addresses and port numbers, you now have to explicitly set `disable_high_cardinality_metrics` to `false`. ## v0.39 diff --git a/docs/sources/flow/tasks/configure/configure-macos.md b/docs/sources/flow/tasks/configure/configure-macos.md index fc1c6677f579..8b860a010dcd 100644 --- a/docs/sources/flow/tasks/configure/configure-macos.md +++ b/docs/sources/flow/tasks/configure/configure-macos.md @@ -31,11 +31,11 @@ To configure {{< param "PRODUCT_NAME" >}} on macOS, perform the following steps: ## Configure the {{% param "PRODUCT_NAME" %}} service -{{% admonition type="note" %}} +{{< admonition type="note" >}} Due to limitations in Homebrew, customizing the service used by {{< param "PRODUCT_NAME" >}} on macOS requires changing the Homebrew formula and reinstalling {{< param "PRODUCT_NAME" >}}. -{{% /admonition %}} +{{< /admonition >}} To customize the {{< param "PRODUCT_NAME" >}} service on macOS, perform the following steps: diff --git a/docs/sources/flow/tasks/estimate-resource-usage.md b/docs/sources/flow/tasks/estimate-resource-usage.md index e7b066d9e8ee..f3ed1b7aed05 100644 --- a/docs/sources/flow/tasks/estimate-resource-usage.md +++ b/docs/sources/flow/tasks/estimate-resource-usage.md @@ -4,7 +4,7 @@ aliases: - /docs/grafana-cloud/agent/flow/tasks/estimate-resource-usage/ - /docs/grafana-cloud/monitor-infrastructure/agent/flow/tasks/estimate-resource-usage/ - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tasks/estimate-resource-usage/ - - /docs/grafana-cloud/send-data/agent/flow/tasks/estimate-resource-usage/ + - /docs/grafana-cloud/send-data/agent/flow/tasks/estimate-resource-usage/ # Previous page aliases for backwards compatibility: - /docs/agent/flow/monitoring/resource-usage/ - /docs/grafana-cloud/agent/flow/monitoring/resource-usage/ @@ -13,7 +13,7 @@ aliases: - /docs/grafana-cloud/send-data/agent/flow/monitoring/resource-usage/ - ../monitoring/resource-usage/ # /docs/agent/latest/flow/monitoring/resource-usage/ canonical: https://grafana.com/docs/agent/latest/flow/monitoring/resource-usage/ -description: Estimate expected Agent resource usage +description: Estimate expected Grafana Agent resource usage headless: true title: Estimate resource usage menuTitle: Estimate resource usage @@ -22,24 +22,22 @@ weight: 190 # Estimate {{% param "PRODUCT_NAME" %}} resource usage -This page provides guidance for expected resource usage of -{{% param "PRODUCT_NAME" %}} for each telemetry type, based on operational -experience of some of the {{% param "PRODUCT_NAME" %}} maintainers. +This page provides guidance for expected resource usage of +{{< param "PRODUCT_NAME" >}} for each telemetry type, based on operational +experience of some of the {{< param "PRODUCT_NAME" >}} maintainers. -{{% admonition type="note" %}} - -The resource usage depends on the workload, hardware and the configuration used. +{{< admonition type="note" >}} +The resource usage depends on the workload, hardware, and the configuration used. The information on this page is a good starting point for most users, but your actual usage may be different. - -{{% /admonition %}} +{{< /admonition >}} ## Prometheus metrics The Prometheus metrics resource usage depends mainly on the number of active series that need to be scraped and the scrape interval. -As a rule of thumb, **per each 1 million active series** and with the default +As a rule of thumb, **per each 1 million active series** and with the default scrape interval, you can expect to use approximately: * 0.4 CPU cores @@ -48,8 +46,7 @@ scrape interval, you can expect to use approximately: These recommendations are based on deployments that use [clustering][], but they will broadly apply to other deployment modes. For more information on how to -deploy {{% param "PRODUCT_NAME" %}}, see -[deploying grafana agent][]. +deploy {{< param "PRODUCT_NAME" >}}, see [deploying grafana agent][]. [deploying grafana agent]: {{< relref "../get-started/deploy-agent.md" >}} [clustering]: {{< relref "../concepts/clustering.md" >}} @@ -67,7 +64,7 @@ to use approximately: These recommendations are based on Kubernetes DaemonSet deployments on clusters with relatively small number of nodes and high logs volume on each. The resource usage can be higher per each 1 MiB/second of logs if you have a large number of -small nodes due to the constant overhead of running the {{% param "PRODUCT_NAME" %}} on each node. +small nodes due to the constant overhead of running the {{< param "PRODUCT_NAME" >}} on each node. Additionally, factors such as number of labels, number of files and average log line length may all play a role in the resource usage. diff --git a/docs/sources/flow/tasks/migrate/from-prometheus.md b/docs/sources/flow/tasks/migrate/from-prometheus.md index 62fef82d3c2d..84241791ec24 100644 --- a/docs/sources/flow/tasks/migrate/from-prometheus.md +++ b/docs/sources/flow/tasks/migrate/from-prometheus.md @@ -71,10 +71,10 @@ This conversion will enable you to take full advantage of the many additional fe 1. If the `convert` command can't convert a Prometheus configuration, diagnostic information is sent to `stderr`.\ You can bypass any non-critical issues and output the {{< param "PRODUCT_NAME" >}} configuration using a best-effort conversion by including the `--bypass-errors` flag. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. Make sure you fully test the converted configuration before using it in a production environment. - {{% /admonition %}} + {{< /admonition >}} {{< code >}} @@ -143,10 +143,10 @@ Your configuration file must be a valid Prometheus configuration file rather tha 1. If your Prometheus configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. You can bypass any non-critical issues and start the Agent by including the `--config.bypass-conversion-errors` flag in addition to `--config.format=prometheus`. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Prometheus configuration. Do not use this flag in a production environment. - {{% /admonition %}} + {{< /admonition >}} ## Example diff --git a/docs/sources/flow/tasks/migrate/from-promtail.md b/docs/sources/flow/tasks/migrate/from-promtail.md index 182dec857c3b..7a0dda9b9248 100644 --- a/docs/sources/flow/tasks/migrate/from-promtail.md +++ b/docs/sources/flow/tasks/migrate/from-promtail.md @@ -71,10 +71,10 @@ This conversion will enable you to take full advantage of the many additional fe 1. If the convert command can't convert a Promtail configuration, diagnostic information is sent to `stderr`. You can bypass any non-critical issues and output the {{< param "PRODUCT_NAME" >}} configuration using a best-effort conversion by including the `--bypass-errors` flag. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Promtail configuration. Make sure you fully test the converted configuration before using it in a production environment. - {{% /admonition %}} + {{< /admonition >}} {{< code >}} @@ -139,10 +139,10 @@ Your configuration file must be a valid Promtail configuration file rather than 1. If your Promtail configuration can't be converted and loaded directly into {{< param "PRODUCT_ROOT_NAME" >}}, diagnostic information is sent to `stderr`. You can bypass any non-critical issues and start {{< param "PRODUCT_ROOT_NAME" >}} by including the `--config.bypass-conversion-errors` flag in addition to `--config.format=promtail`. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Promtail configuration. Do not use this flag in a production environment. - {{%/admonition %}} + {{< /admonition >}} ## Example @@ -213,7 +213,7 @@ After the configuration is converted, review the {{< param "PRODUCT_NAME" >}} co The following list is specific to the convert command and not {{< param "PRODUCT_NAME" >}}: * Check if you are using any extra command line arguments with Promtail that aren't present in your configuration file. For example, `-max-line-size`. -* Check if you are setting any environment variables, whether [expanded in the config file][] itself or consumed directly by Promtail, such as `JAEGER_AGENT_HOST`. +* Check if you are setting any environment variables, whether [expanded in the configuration file][] itself or consumed directly by Promtail, such as `JAEGER_AGENT_HOST`. * In {{< param "PRODUCT_NAME" >}}, the positions file is saved at a different location. Refer to the [loki.source.file][] documentation for more details. Check if you have any existing setup, for example, a Kubernetes Persistent Volume, that you must update to use the new positions file path. @@ -224,7 +224,7 @@ The following list is specific to the convert command and not {{< param "PRODUCT [Promtail]: https://www.grafana.com/docs/loki//clients/promtail/ [debugging]: #debugging -[expanded in the config file]: https://www.grafana.com/docs/loki//clients/promtail/configuration/#use-environment-variables-in-the-configuration +[expanded in the configuration file]: https://www.grafana.com/docs/loki//clients/promtail/configuration/#use-environment-variables-in-the-configuration {{% docs/reference %}} [local.file_match]: "/docs/agent/ -> /docs/agent//flow/reference/components/local.file_match.md" diff --git a/docs/sources/flow/tasks/migrate/from-static.md b/docs/sources/flow/tasks/migrate/from-static.md index b6be6404aa40..5d1b73626f60 100644 --- a/docs/sources/flow/tasks/migrate/from-static.md +++ b/docs/sources/flow/tasks/migrate/from-static.md @@ -74,10 +74,10 @@ This conversion will enable you to take full advantage of the many additional fe 1. If the convert command can't convert a [Static][] configuration, diagnostic information is sent to `stderr`. You can use the `--bypass-errors` flag to bypass any non-critical issues and output the {{< param "PRODUCT_NAME" >}} configuration using a best-effort conversion. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Grafana Agent Static configuration. Make sure you fully test the converted configuration before using it in a production environment. - {{% /admonition %}} + {{< /admonition >}} {{< code >}} @@ -142,10 +142,10 @@ Your configuration file must be a valid [Static] configuration file. 1. If your [Static] configuration can't be converted and loaded directly into {{< param "PRODUCT_NAME" >}}, diagnostic information is sent to `stderr`. You can use the `--config.bypass-conversion-errors` flag with `--config.format=static` to bypass any non-critical issues and start {{< param "PRODUCT_NAME" >}}. - {{% admonition type="caution" %}} + {{< admonition type="caution" >}} If you bypass the errors, the behavior of the converted configuration may not match the original Grafana Agent Static configuration. Do not use this flag in a production environment. - {{%/admonition %}} + {{< /admonition >}} ## Example diff --git a/docs/sources/flow/tutorials/flow-by-example/_index.md b/docs/sources/flow/tutorials/flow-by-example/_index.md index a534a2d2efec..d9b037350272 100644 --- a/docs/sources/flow/tutorials/flow-by-example/_index.md +++ b/docs/sources/flow/tutorials/flow-by-example/_index.md @@ -7,7 +7,7 @@ aliases: canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/ description: Learn how to use Grafana Agent Flow title: Flow by example -weight: 300 +weight: 100 --- # Flow by example diff --git a/docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md b/docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md index 6b13394ad0f0..59bc59c5d17b 100644 --- a/docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md +++ b/docs/sources/flow/tutorials/flow-by-example/first-components-and-stdlib/index.md @@ -5,7 +5,7 @@ aliases: - /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/first-components-and-stdlib/ - /docs/grafana-cloud/send-data/agent/flow/tutorials/first-components-and-stdlib/ canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/first-components-and-stdlib/ -description: Learn about the basics of River and the Flow configuration language +description: Learn about the basics of River and the configuration language title: First components and introducing the standard library weight: 20 --- @@ -16,7 +16,7 @@ This tutorial covers the basics of the River language and the standard library. ## River basics -[Configuration language]: https://grafana.com/docs/agent//flow/config-language/ +[Configuration language]: https://grafana.com/docs/agent//flow/concepts/config-language/ [Configuration language concepts]: https://grafana.com/docs/agent//flow/concepts/configuration_language/ [Standard library documentation]: https://grafana.com/docs/agent//flow/reference/stdlib/ @@ -50,23 +50,23 @@ This tutorial covers the basics of the River language and the standard library. } ``` - {{% admonition type="note" %}} + {{< admonition type="note" >}} The default log level is `info` and the default log format is `logfmt`. - {{% /admonition %}} + {{< /admonition >}} Try pasting this into `config.river` and running `/path/to/agent run config.river` to see what happens. Congratulations, you've just written your first River file! You've also just written your first {{< param "PRODUCT_NAME" >}} configuration file. This configuration won't do anything, so let's add some components to it. - {{% admonition type="note" %}} + {{< admonition type="note" >}} Comments in River are prefixed with `//` and are single-line only. For example: `// This is a comment`. - {{% /admonition %}} + {{< /admonition >}} ## Components [Components]: https://grafana.com/docs/agent//flow/concepts/components/ [Component controller]: https://grafana.com/docs/agent//flow/concepts/component_controller/ -[Components configuration language]: https://grafana.com/docs/agent//flow/config-language/components/ +[Components configuration language]: https://grafana.com/docs/agent//flow/concepts/config-language/components/ [env]: https://grafana.com/docs/agent//flow/reference/stdlib/env/ **Recommended reading** @@ -96,11 +96,11 @@ prometheus.remote_write "local_prom" { } ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} [Component reference]: https://grafana.com/docs/agent//flow/reference/components/ -A list of all available components can be found in the [Component reference][]. Each component has a link to its documentation, which contains a description of what the component does, its arguments, its exports, and Example(s). -{{% /admonition %}} +A list of all available components can be found in the [Component reference][]. Each component has a link to its documentation, which contains a description of what the component does, its arguments, its exports, and examples. +{{< /admonition >}} This pipeline has two components: `local.file` and `prometheus.remote_write`. The `local.file` component is configured with a single argument, `path`, which is set by calling the [env][] standard library function to retrieve the value of the `HOME` environment variable and concatenating it with the string `"file.txt"`. The `local.file` component has a single export, `content`, which contains the contents of the file. @@ -110,13 +110,13 @@ The `prometheus.remote_write` component is configured with an `endpoint` block, Flow of example pipeline with local.file and prometheus.remote_write components

-{{% admonition type="note" %}} +{{< admonition type="note" >}} The `local.file` component's label is set to `"example"`, so the fully qualified name of the component is `local.file.example`. The `prometheus.remote_write` component's label is set to `"local_prom"`, so the fully qualified name of the component is `prometheus.remote_write.local_prom`. -{{% /admonition %}} +{{< /admonition >}} This example pipeline still doesn't do anything, so let's add some more components to it. -## Shipping our first metrics +## Shipping your first metrics [prometheus.exporter.unix]: https://grafana.com/docs/agent//flow/reference/components/prometheus.exporter.unix/ [prometheus.scrape]: https://grafana.com/docs/agent//flow/reference/components/prometheus.scrape/ @@ -128,10 +128,10 @@ This example pipeline still doesn't do anything, so let's add some more componen - Optional: [prometheus.scrape][] - Optional: [prometheus.remote_write][] -Let's make a simple pipeline with a `prometheus.exporter.unix` component, a `prometheus.scrape` component to scrape it, and a `prometheus.remote_write` component to send the scraped metrics to Prometheus. +Make a simple pipeline with a `prometheus.exporter.unix` component, a `prometheus.scrape` component to scrape it, and a `prometheus.remote_write` component to send the scraped metrics to Prometheus. ```river -prometheus.exporter.unix "localhost" { +prometheus.exporter.unix "localhost" { // This component exposes a lot of metrics by default, so we will keep all of the default arguments. } @@ -152,7 +152,7 @@ prometheus.remote_write "local_prom" { } ``` -Run the agent with: +Run {{< param "PRODUCT_NAME" >}} with: ```bash /path/to/agent run config.river @@ -166,13 +166,13 @@ Navigate to [http://localhost:3000/explore](http://localhost:3000/explore) in yo ## Visualizing the relationship between components -Let's look at an example pipeline: +The following diagram is an example pipeline:

Flow of example pipeline with a prometheus.scrape, prometheus.exporter.unix, and prometheus.remote_write components

-The above configuration defines three components: +The preceding configuration defines three components: - `prometheus.scrape` - A component that scrapes metrics from components that export targets. - `prometheus.exporter.unix` - A component that exports metrics from the host, built around [node_exporter](https://github.com/prometheus/node_exporter). @@ -180,7 +180,7 @@ The above configuration defines three components: The `prometheus.scrape` component references the `prometheus.exporter.unix` component's targets export, which is a list of scrape targets. The `prometheus.scrape` component then forwards the scraped metrics to the `prometheus.remote_write` component. -One rule is that components cannot form a cycle. This means that a component cannot reference itself directly or indirectly. This is to prevent infinite loops from forming in the pipeline. +One rule is that components can't form a cycle. This means that a component can't reference itself directly or indirectly. This is to prevent infinite loops from forming in the pipeline. ## Exercise for the reader @@ -190,13 +190,13 @@ One rule is that components cannot form a cycle. This means that a component can - Optional: [prometheus.exporter.redis][] -Let's start a container running Redis and configure the agent to scrape metrics from it. +Let's start a container running Redis and configure {{< param "PRODUCT_NAME" >}} to scrape metrics from it. ```bash docker container run -d --name flow-redis -p 6379:6379 --rm redis ``` -Try modifying the above pipeline to scrape metrics from the Redis exporter. You can refer to the [prometheus.exporter.redis][] component documentation for more information on how to configure it. +Try modifying the pipeline to scrape metrics from the Redis exporter. You can refer to the [prometheus.exporter.redis][] component documentation for more information on how to configure it. To give a visual hint, you want to create a pipeline that looks like this: @@ -204,19 +204,19 @@ To give a visual hint, you want to create a pipeline that looks like this: Flow of exercise pipeline, with a scrape, unix_exporter, redis_exporter, and remote_write component

-{{% admonition type="note" %}} +{{< admonition type="note" >}} [concat]: https://grafana.com/docs/agent//flow/reference/stdlib/concat/ You may find the [concat][] standard library function useful. -{{% /admonition %}} +{{< /admonition >}} -You can run the agent with the new config file by running: +You can run {{< param "PRODUCT_NAME" >}} with the new configuration file by running: ```bash /path/to/agent run config.river ``` -Navigate to [http://localhost:3000/explore](http://localhost:3000/explore) in your browser. After the first scrape, you should be able to query for `redis` metrics as well as `node` metrics! +Navigate to [http://localhost:3000/explore](http://localhost:3000/explore) in your browser. After the first scrape, you should be able to query for `redis` metrics as well as `node` metrics. To shut down the Redis container, run: @@ -228,7 +228,7 @@ If you get stuck, you can always view a solution here: {{< collapse title="Solution" >}} ```river -// Configure your first components, learn about the standard library, and learn how to run the Agent! +// Configure your first components, learn about the standard library, and learn how to run Grafana Agent // prometheus.exporter.redis collects information about Redis and exposes // targets for other components to use @@ -267,8 +267,8 @@ prometheus.remote_write "local_prom" { ## Finishing up and next steps -You might have noticed that running the agent with the above configurations created a directory called `data-agent` in the directory you ran the agent from. This directory is where components can store data, such as the `prometheus.exporter.unix` component storing its WAL (Write Ahead Log). If you look in the directory, do you notice anything interesting? The directory for each component is the fully-qualified name! +You might have noticed that running {{< param "PRODUCT_NAME" >}} with the configurations created a directory called `data-agent` in the directory you ran {{< param "PRODUCT_NAME" >}} from. This directory is where components can store data, such as the `prometheus.exporter.unix` component storing its WAL (Write Ahead Log). If you look in the directory, do you notice anything interesting? The directory for each component is the fully qualified name. -If you'd like to store the data elsewhere, you can specify a different directory by supplying the `--storage.path` flag to the agent's run command, for example, `/path/to/agent run config.river --storage.path /etc/grafana-agent`. Generally, you will want to use a persistent directory for this, as some components may use the data stored in this directory to perform their function. +If you'd like to store the data elsewhere, you can specify a different directory by supplying the `--storage.path` flag to {{< param "PRODUCT_ROOT_NAME" >}}'s run command, for example, `/path/to/agent run config.river --storage.path /etc/grafana-agent`. Generally, you can use a persistent directory for this, as some components may use the data stored in this directory to perform their function. -In the next tutorial, we will look at how to configure the agent to collect logs from a file and send them to Loki. We will also look at using different components to process metrics and logs before sending them. +In the next tutorial, you will look at how to configure {{< param "PRODUCT_NAME" >}} to collect logs from a file and send them to Loki. You will also look at using different components to process metrics and logs before sending them. diff --git a/docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md b/docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md new file mode 100644 index 000000000000..02c7c3c138f9 --- /dev/null +++ b/docs/sources/flow/tutorials/flow-by-example/logs-and-relabeling-basics/index.md @@ -0,0 +1,308 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ +- /docs/grafana-cloud/send-data/agent/flow/tutorials/logs-and-relabeling-basics/ +canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/logs-and-relabeling-basics/ +description: Learn how to relabel metrics and collect logs +title: Logs and relabeling basics +weight: 30 +--- + +# Logs and relabeling basics + +This tutorial assumes you have completed the [First components and introducing the standard library](https://grafana.com/docs/agent//flow/tutorials/flow-by-example/first-components-and-stdlib/) tutorial, or are at least familiar with the concepts of components, attributes, and expressions and how to use them. You will cover some basic metric relabeling, followed by how to send logs to Loki. + +## Relabel metrics + +[prometheus.relabel]: https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel/ + +**Recommended reading** + +- Optional: [prometheus.relabel][] + +Before moving on to logs, let's look at how we can use the `prometheus.relabel` component to relabel metrics. The `prometheus.relabel` component allows you to perform Prometheus relabeling on metrics and is similar to the `relabel_configs` section of a Prometheus scrape config. + +Let's add a `prometheus.relabel` component to a basic pipeline and see how to add labels. + +```river +prometheus.exporter.unix "localhost" { } + +prometheus.scrape "default" { + scrape_interval = "10s" + + targets = prometheus.exporter.unix.localhost.targets + forward_to = [ + prometheus.relabel.example.receiver, + ] +} + +prometheus.relabel "example" { + forward_to = [ + prometheus.remote_write.local_prom.receiver, + ] + + rule { + action = "replace" + target_label = "os" + replacement = constants.os + } +} + +prometheus.remote_write "local_prom" { + endpoint { + url = "http://localhost:9090/api/v1/write" + } +} +``` + +We have now created the following pipeline: + +![Diagram of pipeline that scrapes prometheus.exporter.unix, relabels the metrics, and remote_writes them](/media/docs/agent/diagram-flow-by-example-relabel-0.svg) + +This pipeline has a `prometheus.relabel` component that has a single rule. +This rule has the `replace` action, which will replace the value of the `os` label with a special value: `constants.os`. +This value is a special constant that is replaced with the OS of the host {{< param "PRODUCT_ROOT_NAME" >}} is running on. +You can see the other available constants in the [constants](https://grafana.com/docs/agent//flow/reference/stdlib/constants/) documentation. +This example has one rule block, but you can have as many as you want. +Each rule block is applied in order. + +If you run {{< param "PRODUCT_ROOT_NAME" >}} and navigate to [localhost:3000/explore](http://localhost:3000/explore), you can see the `os` label on the metrics. Try querying for `node_context_switches_total` and look at the labels. + +Relabeling uses the same rules as Prometheus. You can always refer to the [prometheus.relabel documentation](https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel/#rule-block) for a full list of available options. + +{{< admonition type="note" >}} +You can forward multiple components to one `prometheus.relabel` component. This allows you to apply the same relabeling rules to multiple pipelines. +{{< /admonition >}} + +{{< admonition type="warning" >}} +There is an issue commonly faced when relabeling and using labels that start with `__` (double underscore). These labels are considered internal and are dropped before relabeling rules from a `prometheus.relabel` component are applied. If you would like to keep or act on these kinds of labels, use a [discovery.relabel](https://grafana.com/docs/agent//flow/reference/components/discovery.relabel/) component. +{{< /admonition >}} + +## Send logs to Loki + +[local.file_match]: https://grafana.com/docs/agent//flow/reference/components/local.file_match/ +[loki.source.file]: https://grafana.com/docs/agent//flow/reference/components/loki.source.file/ +[loki.write]: https://grafana.com/docs/agent//flow/reference/components/loki.write/ + +**Recommended reading** + +- Optional: [local.file_match][] +- Optional: [loki.source.file][] +- Optional: [loki.write][] + +Now that you're comfortable creating components and chaining them together, let's collect some logs and send them to Loki. We will use the `local.file_match` component to perform file discovery, the `loki.source.file` to collect the logs, and the `loki.write` component to send the logs to Loki. + +Before doing this, we need to ensure we have a log file to scrape. We will use the `echo` command to create a file with some log content. + +```bash +mkdir -p /tmp/flow-logs +echo "This is a log line" > /tmp/flow-logs/log.log +``` + +Now that we have a log file, let's create a pipeline to scrape it. + +```river +local.file_match "tmplogs" { + path_targets = [{"__path__" = "/tmp/flow-logs/*.log"}] +} + +loki.source.file "local_files" { + targets = local.file_match.tmplogs.targets + forward_to = [loki.write.local_loki.receiver] +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +The rough flow of this pipeline is: + +![Diagram of pipeline that collects logs from /tmp/flow-logs and writes them to a local Loki instance](/media/docs/agent/diagram-flow-by-example-logs-0.svg) + +If you navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`, you can query for `{filename="/tmp/flow-logs/log.log"}` and see the log line we created earlier. Try running the following command to add more logs to the file. + +```bash +echo "This is another log line!" >> /tmp/flow-logs/log.log +``` + +If you re-execute the query, you can see the new log lines. + +![Grafana Explore view of example log lines](/media/docs/agent/screenshot-flow-by-example-log-lines.png) + +If you are curious how {{< param "PRODUCT_ROOT_NAME" >}} keeps track of where it is in a log file, you can look at `data-agent/loki.source.file.local_files/positions.yml`. +If you delete this file, {{< param "PRODUCT_ROOT_NAME" >}} starts reading from the beginning of the file again, which is why keeping the {{< param "PRODUCT_ROOT_NAME" >}}'s data directory in a persistent location is desirable. + +## Exercise + +[loki.relabel]: https://grafana.com/docs/agent//flow/reference/components/loki.relabel/ +[loki.process]: https://grafana.com/docs/agent//flow/reference/components/loki.process/ + +**Recommended reading** + +- [loki.relabel][] +- [loki.process][] + +### Add a Label to Logs + +This exercise will have two parts, building on the previous example. Let's start by adding an `os` label (just like the Prometheus example) to all of the logs we collect. + +Modify the following snippet to add the label `os` with the value of the `os` constant. + +```river +local.file_match "tmplogs" { + path_targets = [{"__path__" = "/tmp/flow-logs/*.log"}] +} + +loki.source.file "local_files" { + targets = local.file_match.tmplogs.targets + forward_to = [loki.write.local_loki.receiver] +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +{{< admonition type="note" >}} +You can use the [loki.relabel](https://grafana.com/docs/agent//flow/reference/components/loki.relabel) component to relabel and add labels, just like you can with the [prometheus.relabel](https://grafana.com/docs/agent//flow/reference/components/prometheus.relabel) component. +{{< /admonition >}} + +Once you have your completed configuration, run {{< param "PRODUCT_ROOT_NAME" >}} and execute the following: + +```bash +echo 'level=info msg="INFO: This is an info level log!"' >> /tmp/flow-logs/log.log +echo 'level=warn msg="WARN: This is a warn level log!"' >> /tmp/flow-logs/log.log +echo 'level=debug msg="DEBUG: This is a debug level log!"' >> /tmp/flow-logs/log.log +``` + +Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. Try querying for `{filename="/tmp/flow-logs/log.log"}` and see if you can find the new label! + +Now that we have added new labels, we can also filter on them. Try querying for `{os!=""}`. You should only see the lines you added in the previous step. + +{{< collapse title="Solution" >}} + +```river +// Let's learn about relabeling and send logs to Loki! + +local.file_match "tmplogs" { + path_targets = [{"__path__" = "/tmp/flow-logs/*.log"}] +} + +loki.source.file "local_files" { + targets = local.file_match.tmplogs.targets + forward_to = [loki.relabel.add_static_label.receiver] +} + +loki.relabel "add_static_label" { + forward_to = [loki.write.local_loki.receiver] + + rule { + target_label = "os" + replacement = constants.os + } +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +{{< /collapse >}} + +### Extract and add a Label from Logs + +{{< admonition type="note" >}} +This exercise is more challenging than the previous one. If you are having trouble, skip it and move to the next section, which will cover some of the concepts used here. You can always come back to this exercise later. +{{< /admonition >}} + +This exercise will build on the previous one, though it's more involved. + +Let's say we want to extract the `level` from the logs and add it as a label. As a starting point, look at [loki.process][]. +This component allows you to perform processing on logs, including extracting values from log contents. + +Try modifying your configuration from the previous section to extract the `level` from the logs and add it as a label. +If needed, you can find a solution to the previous exercise at the end of the [previous section](#add-a-label-to-logs). + +{{< admonition type="note" >}} +The `stage.logfmt` and `stage.labels` blocks for `loki.process` may be helpful. +{{< /admonition >}} + +Once you have your completed config, run {{< param "PRODUCT_ROOT_NAME" >}} and execute the following: + +```bash +echo 'level=info msg="INFO: This is an info level log!"' >> /tmp/flow-logs/log.log +echo 'level=warn msg="WARN: This is a warn level log!"' >> /tmp/flow-logs/log.log +echo 'level=debug msg="DEBUG: This is a debug level log!"' >> /tmp/flow-logs/log.log +``` + +Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. Try querying for `{level!=""}` to see the new labels in action. + +![Grafana Explore view of example log lines, now with the extracted 'level' label](/media/docs/agent/screenshot-flow-by-example-log-line-levels.png) + +{{< collapse title="Solution" >}} + +```river +// Let's learn about relabeling and send logs to Loki! + +local.file_match "tmplogs" { + path_targets = [{"__path__" = "/tmp/flow-logs/*.log"}] +} + +loki.source.file "local_files" { + targets = local.file_match.tmplogs.targets + forward_to = [loki.process.add_new_label.receiver] +} + +loki.process "add_new_label" { + // Extract the value of "level" from the log line and add it to the extracted map as "extracted_level" + // You could also use "level" = "", which would extract the value of "level" and add it to the extracted map as "level" + // but to make it explicit for this example, we will use a different name. + // + // The extracted map will be covered in more detail in the next section. + stage.logfmt { + mapping = { + "extracted_level" = "level", + } + } + + // Add the value of "extracted_level" from the extracted map as a "level" label + stage.labels { + values = { + "level" = "extracted_level", + } + } + + forward_to = [loki.relabel.add_static_label.receiver] +} + +loki.relabel "add_static_label" { + forward_to = [loki.write.local_loki.receiver] + + rule { + target_label = "os" + replacement = constants.os + } +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +{{< /collapse >}} + +## Finishing up and next steps + +You have learned the concepts of components, attributes, and expressions. You have also seen how to use some standard library components to collect metrics and logs. In the next tutorial, you will learn more about how to use the `loki.process` component to extract values from logs and use them. + diff --git a/docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md b/docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md new file mode 100644 index 000000000000..327b40716c30 --- /dev/null +++ b/docs/sources/flow/tutorials/flow-by-example/processing-logs/index.md @@ -0,0 +1,407 @@ +--- +aliases: +- /docs/grafana-cloud/agent/flow/tutorials/flow-by-example/processing-logs/ +- /docs/grafana-cloud/monitor-infrastructure/agent/flow/tutorials/flow-by-example/processing-logs/ +- /docs/grafana-cloud/monitor-infrastructure/integrations/agent/flow/tutorials/flow-by-example/processing-logs/ +- /docs/grafana-cloud/send-data/agent/flow/tutorials/processing-logs/ +canonical: https://grafana.com/docs/agent/latest/flow/tutorials/flow-by-example/processing-logs/ +description: Learn how to process logs +title: Processing Logs +weight: 40 +--- + +# Processing Logs + +This tutorial assumes you are familiar with setting up and connecting components. +It covers using `loki.source.api` to receive logs over HTTP, processing and filtering them, and sending them to Loki. + +## Receive logs over HTTP and Process + +**Recommended reading** + +- Optional: [loki.source.api](https://grafana.com/docs/agent//flow/reference/components/loki.source.api/) + +The `loki.source.api` component can receive logs over HTTP. +It can be useful for receiving logs from other {{< param "PRODUCT_ROOT_NAME" >}}s or collectors, or directly from applications that can send logs over HTTP, and then processing them centrally. + +Your pipeline is going to look like this: + +![Loki Source API Pipeline](/media/docs/agent/diagram-flow-by-example-logs-pipeline.svg) + +Let's start by setting up the `loki.source.api` component: + +```river +loki.source.api "listener" { + http { + listen_address = "127.0.0.1" + listen_port = 9999 + } + + labels = { "source": "api" } + + forward_to = [loki.process.process_logs.receiver] +} +``` + +This is a simple configuration. +You are configuring the `loki.source.api` component to listen on `127.0.0.1:9999` and attach a `source="api"` label to the received log entries, which are then forwarded to the `loki.process.process_logs` component's exported receiver. +Next, you can configure the `loki.process` and `loki.write` components. + +## Process and Write Logs + +**Recommended reading** + +- [loki.process#stage.drop](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagedrop-block) +- [loki.process#stage.json](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagejson-block) +- [loki.process#stage.labels](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagelabels-block) + +```river +// Let's send and process more logs! + +loki.source.api "listener" { + http { + listen_address = "127.0.0.1" + listen_port = 9999 + } + + labels = { "source" = "api" } + + forward_to = [loki.process.process_logs.receiver] +} + +loki.process "process_logs" { + + // Stage 1 + stage.json { + expressions = { + log = "", + ts = "timestamp", + } + } + + // Stage 2 + stage.timestamp { + source = "ts" + format = "RFC3339" + } + + // Stage 3 + stage.json { + source = "log" + + expressions = { + is_secret = "", + level = "", + log_line = "message", + } + } + + // Stage 4 + stage.drop { + source = "is_secret" + value = "true" + } + + // Stage 5 + stage.labels { + values = { + level = "", + } + } + + // Stage 6 + stage.output { + source = "log_line" + } + + // This stage adds static values to the labels on the log line + stage.static_labels { + values = { + source = "demo-api", + } + } + + forward_to = [loki.write.local_loki.receiver] +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +You can skip to the next section if you successfully completed the previous section's exercises. +If not, or if you were unsure how things worked, let's break down what is happening in the `loki.process` component. + +Many of the `stage.*` blocks in `loki.process` act on reading or writing a shared map of values extracted from the logs. +You can think of this extracted map as a hashmap or table that each stage has access to, and it is referred to as the "extracted map" from here on. +In subsequent stages, you can use the extracted map to filter logs, add or remove labels, or even modify the log line. + +{{< admonition type="note" >}} +`stage.*` blocks are executed in the order they appear in the component, top down. +{{< /admonition >}} + +Let's use an example log line to illustrate this, then go stage by stage, showing the contents of the extracted map. Here is our example log line: + +```json +{ + "log": { + "is_secret": "true", + "level": "info", + "message": "This is a secret message!", + }, + "timestamp": "2023-11-16T06:01:50Z", +} +``` + +### Stage 1 + +```river +stage.json { + expressions = { + log = "", + ts = "timestamp", + } +} +``` + +This stage parses the log line as JSON, extracts two values from it, `log` and `timestamp`, and puts them into the extracted map with keys `log` and `ts`, respectively. + +{{< admonition type="note" >}} +Supplying an empty string is shorthand for using the same key as in the input log line (so `log = ""` is the same as `log = "log"`). The _keys_ of the `expressions` object end up as the keys in the extracted map, and the _values_ are used as keys to look up in the parsed log line. +{{< /admonition >}} + +If this were Python, it would be roughly equivalent to: + +```python +extracted_map = {} +log_line = {"log": {"is_secret": "true", "level": "info", "message": "This is a secret message!"}, "timestamp": "2023-11-16T06:01:50Z"} + +extracted_map["log"] = log_line["log"] +extracted_map["ts"] = log_line["timestamp"] +``` + +Extracted map _before_ performing this stage: + +```json +{} +``` + +Extracted map _after_ performing this stage: + +```json +{ + "log": { + "is_secret": "true", + "level": "info", + "message": "This is a secret message!", + }, + "ts": "2023-11-16T06:01:50Z", +} +``` + +### Stage 2 + +```river +stage.timestamp { + source = "ts" + format = "RFC3339" +} +``` + +This stage acts on the `ts` value in the map you extracted in the previous stage. +The value of `ts` is parsed in the format of `RFC3339` and added as the timestamp to be ingested by Loki. +This is useful if you want to use the timestamp present in the log itself, rather than the time the log is ingested. +This stage does not modify the extracted map. + +### Stage 3 + +```river +stage.json { + source = "log" + + expressions = { + is_secret = "", + level = "", + log_line = "message", + } +} +``` + +This stage acts on the `log` value in the extracted map, which is a value that you extracted in the previous stage. +This value is also a JSON object, so you can extract values from it as well. +This stage extracts three values from the `log` value, `is_secret`, `level`, and `log_line`, and puts them into the extracted map with keys `is_secret`, `level`, and `log_line`. + +If this were Python, it would be roughly equivalent to: + +```python +extracted_map = { + "log": { + "is_secret": "true", + "level": "info", + "message": "This is a secret message!", + }, + "ts": "2023-11-16T06:01:50Z", +} + +source = extracted_map["log"] + +extracted_map["is_secret"] = source["is_secret"] +extracted_map["level"] = source["level"] +extracted_map["log_line"] = source["message"] +``` + +Extracted map _before_ performing this stage: + +```json +{ + "log": { + "is_secret": "true", + "level": "info", + "message": "This is a secret message!", + }, + "ts": "2023-11-16T06:01:50Z", +} +``` + +Extracted map _after_ performing this stage: + +```json +{ + "log": { + "is_secret": "true", + "level": "info", + "message": "This is a secret message!", + }, + "ts": "2023-11-16T06:01:50Z", + "is_secret": "true", + "level": "info", + "log_line": "This is a secret message!", +} +``` + +### Stage 4 + +```river +stage.drop { + source = "is_secret" + value = "true" +} +``` + +This stage acts on the `is_secret` value in the extracted map, which is a value that you extracted in the previous stage. +This stage drops the log line if the value of `is_secret` is `"true"` and does not modify the extracted map. +There are many other ways to filter logs, but this is a simple example. +Refer to the [loki.process#stage.drop](https://grafana.com/docs/agent//flow/reference/components/loki.process/#stagedrop-block) documentation for more information. + +### Stage 5 + +```river +stage.labels { + values = { + level = "", + } +} +``` + +This stage adds a label to the log using the same shorthand as above (so this is equivalent to using `values = { level = "level" }`). +This stage adds a label with key `level` and the value of `level` in the extracted map to the log (`"info"` from our example log line). +This stage does not modify the extracted map. + +### Stage 6 + +```river +stage.output { + source = "log_line" +} +``` + +This stage uses the `log_line` value in the extracted map to set the actual log line that is forwarded to Loki. +Rather than sending the entire JSON blob to Loki, you are only sending `original_log_line["log"]["message"]`, along with some labels that you attached. + +This stage does not modify the extracted map. + +## Putting it all together + +Now that you have all of the pieces, let's run the {{< param "PRODUCT_ROOT_NAME" >}} and send some logs to it. +Modify `config.river` with the config from the previous example and start the {{< param "PRODUCT_ROOT_NAME" >}} with: + +```bash +/path/to/agent run config.river +``` + +To get the current time in `RFC3339` format, you can run: + +```bash +date -u +"%Y-%m-%dT%H:%M:%SZ" +``` + +Try executing the following, replacing the `"timestamp"` value: + +```bash +curl localhost:9999/loki/api/v1/raw -XPOST -H "Content-Type: application/json" -d '{"log": {"is_secret": "false", "level": "debug", "message": "This is a debug message!"}, "timestamp": }' +``` + +Now that you have sent some logs, let's see how they look in Grafana. +Navigate to [localhost:3000/explore](http://localhost:3000/explore) and switch the Datasource to `Loki`. +Try querying for `{source="demo-api"}` and see if you can find the logs you sent. + +Try playing around with the values of `"level"`, `"message"`, `"timestamp"`, and `"is_secret"` and see how the logs change. +You can also try adding more stages to the `loki.process` component to extract more values from the logs, or add more labels. + +![Example Loki Logs](/media/docs/agent/screenshot-flow-by-example-processed-log-lines.png) + +## Exercise + +Since you are already using Docker and Docker exports logs, let's get those logs into Loki. +You can refer to the [discovery.docker](https://grafana.com/docs/agent//flow/reference/components/discovery.docker/) and [loki.source.docker](https://grafana.com/docs/agent//flow/reference/components/loki.source.docker/) documentation for more information. + +To ensure proper timestamps and other labels, make sure you use a `loki.process` component to process the logs before sending them to Loki. + +Although you have not used it before, let's use a `discovery.relabel` component to attach the container name as a label to the logs. +You can refer to the [discovery.relabel](https://grafana.com/docs/agent//flow/reference/components/discovery.relabel/) documentation for more information. +The `discovery.relabel` component is very similar to the `prometheus.relabel` component, but is used to relabel discovered targets rather than metrics. + +{{< collapse title="Solution" >}} + +```river +// Discover docker containers to collect logs from +discovery.docker "docker_containers" { + // Note that if you are using Docker Desktop Engine this may need to be changed to + // something like "unix:///${HOME}/.docker/desktop/docker.sock" + host = "unix:///var/run/docker.sock" +} + +// Extract container name from __meta_docker_container_name label and add as label +discovery.relabel "docker_containers" { + targets = discovery.docker.docker_containers.targets + + rule { + source_labels = ["__meta_docker_container_name"] + target_label = "container" + } +} + +// Scrape logs from docker containers and send to be processed +loki.source.docker "docker_logs" { + host = "unix:///var/run/docker.sock" + targets = discovery.relabel.docker_containers.output + forward_to = [loki.process.process_logs.receiver] +} + +// Process logs and send to Loki +loki.process "process_logs" { + stage.docker { } + + forward_to = [loki.write.local_loki.receiver] +} + +loki.write "local_loki" { + endpoint { + url = "http://localhost:3100/loki/api/v1/push" + } +} +``` + +{{< /collapse >}} \ No newline at end of file diff --git a/docs/sources/operator/_index.md b/docs/sources/operator/_index.md index 1c9aef3aca53..a39241c87a62 100644 --- a/docs/sources/operator/_index.md +++ b/docs/sources/operator/_index.md @@ -25,17 +25,17 @@ telemetry collection: * Prometheus Operator [Probe][] resources for collecting metrics from Kubernetes [Ingresses][]. * Custom [PodLogs][] resources for collecting logs. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Grafana Agent Operator does not collect traces. -{{% /admonition %}} +{{< /admonition >}} Grafana Agent Operator is currently in [Beta][], and is subject to change or being removed with functionality which covers the same use case. -{{% admonition type="note" %}} +{{< admonition type="note" >}} If you are shipping your data to Grafana Cloud, use [Kubernetes Monitoring](/docs/grafana-cloud/kubernetes-monitoring/) to set up Agent Operator. Kubernetes Monitoring provides a simplified approach and preconfigured dashboards and alerts. -{{% /admonition %}} +{{< /admonition >}} Grafana Agent Operator uses additional custom resources to manage the deployment and configuration of Grafana Agents running in static mode. In addition to the diff --git a/docs/sources/operator/deploy-agent-operator-resources.md b/docs/sources/operator/deploy-agent-operator-resources.md index 2823f58cb996..6b6f6564c85a 100644 --- a/docs/sources/operator/deploy-agent-operator-resources.md +++ b/docs/sources/operator/deploy-agent-operator-resources.md @@ -34,9 +34,9 @@ The hierarchy of custom resources is as follows: To learn more about the custom resources Agent Operator provides and their hierarchy, see [Grafana Agent Operator architecture]({{< relref "./architecture" >}}). -{{% admonition type="note" %}} +{{< admonition type="note" >}} Agent Operator is currently in [beta]({{< relref "../stability.md#beta" >}}) and its custom resources are subject to change. -{{% /admonition %}} +{{< /admonition >}} ## Before you begin @@ -46,9 +46,9 @@ Before you begin, make sure that you have deployed the Grafana Agent Operator CR In this section, you'll roll out a `GrafanaAgent` resource. See [Grafana Agent Operator architecture]({{< relref "./architecture" >}}) for a discussion of the resources in the `GrafanaAgent` resource hierarchy. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Due to the variety of possible deployment architectures, the official Agent Operator Helm chart does not provide built-in templates for the custom resources described in this guide. You must configure and deploy these manually as described in this section. We recommend templating and adding the following manifests to your own in-house Helm charts and GitOps flows. -{{% /admonition %}} +{{< /admonition >}} To deploy the `GrafanaAgent` resource: @@ -145,7 +145,7 @@ To deploy the `GrafanaAgent` resource: - Specifies an Agent image version. - Specifies `MetricsInstance` and `LogsInstance` selectors. These search for `MetricsInstances` and `LogsInstances` in the same namespace with labels matching `agent: grafana-agent-metrics` and `agent: grafana-agent-logs`, respectively. - - Sets a `cluster: cloud` label for all metrics shipped to your Prometheus-compatible endpoint. Change this label to your cluster name. To search for `MetricsInstances` or `LogsInstances` in a *different* namespace, use the `instanceNamespaceSelector` field. To learn more about this field, see the `GrafanaAgent` [CRD specification](https://github.com/grafana/agent/blob/main/production/operator/crds/monitoring.grafana.com_grafanaagents.yaml). + - Sets a `cluster: cloud` label for all metrics shipped to your Prometheus-compatible endpoint. Change this label to your cluster name. To search for `MetricsInstances` or `LogsInstances` in a *different* namespace, use the `instanceNamespaceSelector` field. To learn more about this field, see the `GrafanaAgent` [CRD specification](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds/monitoring.grafana.com_grafanaagents.yaml). 1. Customize the manifests as needed and roll them out to your cluster using `kubectl apply -f` followed by the filename. @@ -381,9 +381,9 @@ To deploy the `LogsInstance` resource into your cluster: 1. Copy the following `PodLogs` manifest to a file, then roll it to your cluster using `kubectl apply -f` followed by the filename. The manifest defines your logging targets. Agent Operator turns this into Agent configuration for the logs subsystem, and rolls it out to the DaemonSet of logging Agents. - {{% admonition type="note" %}} + {{< admonition type="note" >}} The following is a minimal working example which you should adapt to your production needs. - {{% /admonition %}} + {{< /admonition >}} ```yaml apiVersion: monitoring.grafana.com/v1alpha1 @@ -403,7 +403,7 @@ To deploy the `LogsInstance` resource into your cluster: matchLabels: {} ``` - This example tails container logs for all Pods in the `default` namespace. You can restrict the set of matched Pods by using the `matchLabels` selector. You can also set additional `pipelineStages` and create `relabelings` to add or modify log line labels. To learn more about the `PodLogs` specification and available resource fields, see the [PodLogs CRD](https://github.com/grafana/agent/blob/main/production/operator/crds/monitoring.grafana.com_podlogs.yaml). + This example tails container logs for all Pods in the `default` namespace. You can restrict the set of matched Pods by using the `matchLabels` selector. You can also set additional `pipelineStages` and create `relabelings` to add or modify log line labels. To learn more about the `PodLogs` specification and available resource fields, see the [PodLogs CRD](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds/monitoring.grafana.com_podlogs.yaml). The above `PodLogs` resource adds the following labels to log lines: diff --git a/docs/sources/operator/getting-started.md b/docs/sources/operator/getting-started.md index c59acf233391..e7393880876b 100644 --- a/docs/sources/operator/getting-started.md +++ b/docs/sources/operator/getting-started.md @@ -35,7 +35,7 @@ will fail if it can't find the Custom Resource Definitions of objects it is looking to use. To learn more about the custom resources Agent Operator provides and their hierarchy, see [Grafana Agent Operator architecture]({{< relref "./architecture" >}}). You can find the set of Custom Resource Definitions for Grafana Agent Operator in the Grafana Agent repository under -[production/operator/crds](https://github.com/grafana/agent/tree/main/production/operator/crds). +[`operations/agent-static-operator/crds`](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds). To deploy the CRDs: diff --git a/docs/sources/operator/helm-getting-started.md b/docs/sources/operator/helm-getting-started.md index 78245505d859..bb63f01190ce 100644 --- a/docs/sources/operator/helm-getting-started.md +++ b/docs/sources/operator/helm-getting-started.md @@ -27,7 +27,7 @@ To deploy Agent Operator with Helm, make sure that you have the following: ## Install the Agent Operator Helm Chart -In this section, you'll install the [grafana-agent-operator Helm chart](https://github.com/grafana/helm-charts/tree/main/charts/agent-operator) into your Kubernetes cluster. This will install the latest version of Agent Operator and its [Custom Resource Definitions](https://github.com/grafana/agent/tree/main/production/operator/crds) (CRDs). The chart configures Operator to maintain a Service that lets you scrape kubelets using a `ServiceMonitor`. +In this section, you'll install the [grafana-agent-operator Helm chart](https://github.com/grafana/helm-charts/tree/main/charts/agent-operator) into your Kubernetes cluster. This will install the latest version of Agent Operator and its [Custom Resource Definitions](https://github.com/grafana/agent/tree/main/operations/agent-static-operator/crds) (CRDs). The chart configures Operator to maintain a Service that lets you scrape kubelets using a `ServiceMonitor`. To install the Agent Operator Helm chart: diff --git a/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md b/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md index f70facdf3541..2997d8c140e8 100644 --- a/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md +++ b/docs/sources/shared/flow/reference/components/otelcol-debug-metrics-block.md @@ -16,7 +16,7 @@ The following arguments are supported: Name | Type | Description | Default | Required -----------------------------------|-----------|------------------------------------------------------|---------|--------- -`disable_high_cardinality_metrics` | `boolean` | Whether to disable certain high cardinality metrics. | `false` | no +`disable_high_cardinality_metrics` | `boolean` | Whether to disable certain high cardinality metrics. | `true` | no `disable_high_cardinality_metrics` is the Grafana Agent equivalent to the `telemetry.disableHighCardinalityMetrics` feature gate in the OpenTelemetry Collector. It removes attributes that could cause high cardinality metrics. diff --git a/docs/sources/shared/flow/reference/components/rule-block-logs.md b/docs/sources/shared/flow/reference/components/rule-block-logs.md index 180bb7e18167..3db6449ed1b1 100644 --- a/docs/sources/shared/flow/reference/components/rule-block-logs.md +++ b/docs/sources/shared/flow/reference/components/rule-block-logs.md @@ -40,6 +40,6 @@ You can use the following actions: * `replace` - Matches `regex` to the concatenated labels. If there's a match, it replaces the content of the `target_label` using the contents of the `replacement` field. * `uppercase` - Sets `target_label` to the uppercase form of the concatenated `source_labels`. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The regular expression capture groups can be referred to using either the `$CAPTURE_GROUP_NUMBER` or `${CAPTURE_GROUP_NUMBER}` notation. -{{% /admonition %}} +{{< /admonition >}} diff --git a/docs/sources/shared/flow/reference/components/rule-block.md b/docs/sources/shared/flow/reference/components/rule-block.md index 0b732954b18a..614b062b0ec6 100644 --- a/docs/sources/shared/flow/reference/components/rule-block.md +++ b/docs/sources/shared/flow/reference/components/rule-block.md @@ -40,6 +40,6 @@ You can use the following actions: * `replace` - Matches `regex` to the concatenated labels. If there's a match, it replaces the content of the `target_label` using the contents of the `replacement` field. * `uppercase` - Sets `target_label` to the uppercase form of the concatenated `source_labels`. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The regular expression capture groups can be referred to using either the `$CAPTURE_GROUP_NUMBER` or `${CAPTURE_GROUP_NUMBER}` notation. -{{% /admonition %}} +{{< /admonition >}} diff --git a/docs/sources/shared/wal-data-retention.md b/docs/sources/shared/wal-data-retention.md index 973af3afb4d6..e7fa38871801 100644 --- a/docs/sources/shared/wal-data-retention.md +++ b/docs/sources/shared/wal-data-retention.md @@ -86,9 +86,9 @@ is unsuccessful, and you must manually delete the corrupted WAL to continue. If the WAL becomes corrupted, Grafana Agent writes error messages such as `err="failed to find segment for index"` to the log file. -{{% admonition type="note" %}} +{{< admonition type="note" >}} Deleting a WAL segment or a WAL file permanently deletes the stored WAL data. -{{% /admonition %}} +{{< /admonition >}} To delete the corrupted WAL: @@ -100,12 +100,12 @@ To delete the corrupted WAL: may be different than the default depending on the [wal_directory][] setting in your Static configuration file or the path specified by the Flow [command line flag][run] `--storage-path`. - {{% admonition type="note" %}} + {{< admonition type="note" >}} There is one `wal` directory per: * Metrics instance running in Static mode * `prometheus.remote_write` component running in Flow mode - {{% /admonition %}} + {{< /admonition >}} 1. [Start][Stop] Grafana Agent and verify that the WAL is working correctly. diff --git a/docs/sources/static/api/_index.md b/docs/sources/static/api/_index.md index 95995f5b6abb..ff3237d15a99 100644 --- a/docs/sources/static/api/_index.md +++ b/docs/sources/static/api/_index.md @@ -25,10 +25,10 @@ API endpoints are stable unless otherwise noted. Grafana Agent exposes a config management REST API for managing instance configurations when it is running in [scraping service mode][scrape]. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The scraping service mode is a requirement for the config management API, however this is not a prerequisite for the Agent API or Ready/Healthy API. -{{% /admonition %}} +{{< /admonition >}} The following endpoints are exposed: @@ -127,13 +127,13 @@ defined in the Configuration Reference. The name field of the configuration is ignored and the name in the URL takes precedence. The request body must be formatted as YAML. -{{% admonition type="warning" %}} +{{< admonition type="warning" >}} By default, all instance configuration files that read credentials from a file on disk will be rejected. This prevents malicious users from reading the contents of arbitrary files as passwords and sending their contents to fake remote_write endpoints. To change the behavior, set `dangerous_allow_reading_files` to true in the `scraping_service` block. -{{% /admonition %}} +{{< /admonition >}} Status code: 201 with a new config, 200 on updated config. Response on success: @@ -174,9 +174,9 @@ Response on success: GET /agent/api/v1/metrics/instances ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} The deprecated alias is `/agent/api/v1/instances` -{{% /admonition %}} +{{< /admonition >}} Status code: 200 on success. Response on success: @@ -196,9 +196,9 @@ Response on success: GET /agent/api/v1/metrics/targets ``` -{{% admonition type="note" %}} +{{< admonition type="note" >}} The deprecated alias is `/agent/api/v1/targets` -{{% /admonition %}} +{{< /admonition >}} This endpoint collects all metrics subsystem targets known to the Agent across all running instances. Only targets being scraped from the local Agent will be returned. If diff --git a/docs/sources/static/configuration/_index.md b/docs/sources/static/configuration/_index.md index 92dc9a452bbd..fa1a195bd638 100644 --- a/docs/sources/static/configuration/_index.md +++ b/docs/sources/static/configuration/_index.md @@ -137,9 +137,9 @@ The following flags will configure basic auth for requests made to HTTP/S remote - `-config.url.basic-auth-user `: the basic auth username - `-config.url.basic-auth-password-file `: path to a file containing the basic auth password -{{% admonition type="note" %}} +{{< admonition type="note" >}} This beta feature is subject to change in future releases. -{{% /admonition %}} +{{< /admonition >}} {{% docs/reference %}} [flags]: "/docs/agent/ -> /docs/agent//static/configuration/flags" diff --git a/docs/sources/static/configuration/integrations/snmp-config.md b/docs/sources/static/configuration/integrations/snmp-config.md index 893348006f92..c9c4f910f908 100644 --- a/docs/sources/static/configuration/integrations/snmp-config.md +++ b/docs/sources/static/configuration/integrations/snmp-config.md @@ -14,9 +14,9 @@ The `snmp` block configures the `snmp` integration, which is an embedded version of [`snmp_exporter`](https://github.com/prometheus/snmp_exporter). This allows collection of SNMP metrics from the network devices with ease. -{{% admonition type="note" %}} +{{< admonition type="note" >}} `snmp config` uses the latest configuration introduced in version 0.23 of the Prometheus `snmp_exporter`. -{{% /admonition %}} +{{< /admonition >}} ## Quick configuration example diff --git a/docs/sources/static/configuration/integrations/windows-exporter-config.md b/docs/sources/static/configuration/integrations/windows-exporter-config.md index 7f12117ebfbc..bcb753b0860c 100644 --- a/docs/sources/static/configuration/integrations/windows-exporter-config.md +++ b/docs/sources/static/configuration/integrations/windows-exporter-config.md @@ -62,7 +62,7 @@ Full reference of options: # List of collectors to enable. Any non-experimental collector from the # embedded version of windows_exporter can be enabled here. - [enabled_collectors: | default = "cpu,cs,logical_disk,net,os,service,system,textfile"] + [enabled_collectors: | default = "cpu,cs,logical_disk,net,os,service,system"] # Settings for collectors which accept configuration. Settings specified here # are only used if the corresponding collector is enabled in diff --git a/docs/sources/static/configuration/traces-config.md b/docs/sources/static/configuration/traces-config.md index 8ede4e9eb94e..4ff3bfc85e2a 100644 --- a/docs/sources/static/configuration/traces-config.md +++ b/docs/sources/static/configuration/traces-config.md @@ -17,11 +17,11 @@ configures its own tracing pipeline. Having multiple configs allows you to configure multiple distinct pipelines, each of which collects spans and sends them to a different location. -{{% admonition type="note" %}} +{{< admonition type="note" >}} If you are using multiple configs, you must manually set port numbers for each receiver, otherwise they will all try to use the same port and fail to start. -{{% /admonition %}} +{{< /admonition >}} ```yaml configs: diff --git a/docs/sources/static/release-notes.md b/docs/sources/static/release-notes.md index ad01a1fb4404..90afd41dfc42 100644 --- a/docs/sources/static/release-notes.md +++ b/docs/sources/static/release-notes.md @@ -46,9 +46,9 @@ that supports OTLP. ### Breaking change: The default value of `retry_on_http_429` is overriden to `true` for the `queue_config` in `remote_write` in `metrics` config. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The default set by Grafana Agent Static Mode is different than the default set by Prometheus. -{{% /admonition %}} +{{< /admonition >}} The Prometheus default value for `retry_on_http_429` is set to `true` for the `queue_config` in `remote_write`. This changed default setting allows the agent to retry sending data when it receives an HTTP 429 error and helps avoid losing data in metric pipelines. diff --git a/docs/sources/static/set-up/install/_index.md b/docs/sources/static/set-up/install/_index.md index 24663d2bdade..3e62fdbdf80d 100644 --- a/docs/sources/static/set-up/install/_index.md +++ b/docs/sources/static/set-up/install/_index.md @@ -22,24 +22,20 @@ The following architectures are supported: - macOS: AMD64 (Intel), ARM64 (Apple Silicon) - FreeBSD: AMD64 -{{% admonition type="note" %}} +{{< admonition type="note" >}} ppc64le builds are considered secondary release targets and do not have the same level of support and testing as other platforms. -{{% /admonition %}} +{{< /admonition >}} {{< section >}} -{{% admonition type="note" %}} +{{< admonition type="note" >}} Installing Grafana Agent on other operating systems is possible, but is not recommended or supported. -{{% /admonition %}} +{{< /admonition >}} ## Grafana Cloud Use the Grafana Agent [Kubernetes configuration](/docs/grafana-cloud/monitor-infrastructure/kubernetes-monitoring/configuration/) or follow instructions for installing the Grafana Agent in the [Walkthrough](/docs/grafana-cloud/monitor-infrastructure/integrations/get-started/). -## Tanka - -For more information, refer to the [Tanka](https://tanka.dev) configurations in the Grafana Agent [production](https://github.com/grafana/agent/tree/main/production/tanka/grafana-agent) directory on GitHub. - ## Data collection By default, Grafana Agent sends anonymous usage information to Grafana Labs. Refer to [data collection][] for more information diff --git a/docs/sources/static/set-up/install/install-agent-binary.md b/docs/sources/static/set-up/install/install-agent-binary.md index 82e14c59119a..8d53d8376821 100644 --- a/docs/sources/static/set-up/install/install-agent-binary.md +++ b/docs/sources/static/set-up/install/install-agent-binary.md @@ -19,9 +19,9 @@ Grafana Agent is distributed as a standalone binary for the following operating * macOS: AMD64, (Intel), ARM64 (Apple Silicon) * Windows: AMD64 -{{% admonition type="note" %}} +{{< admonition type="note" >}} ppc64le builds are considered secondary release targets and do not have the same level of support and testing as other platforms. -{{% /admonition %}} +{{< /admonition >}} The binary executable will run Grafana Agent in standalone mode. If you want to run Grafana Agent as a service, refer to the installation instructions for: diff --git a/docs/sources/static/set-up/install/install-agent-docker.md b/docs/sources/static/set-up/install/install-agent-docker.md index ad563251c2ae..bece55596635 100644 --- a/docs/sources/static/set-up/install/install-agent-docker.md +++ b/docs/sources/static/set-up/install/install-agent-docker.md @@ -41,9 +41,9 @@ docker run \ Replace `CONFIG_FILE_PATH` with the configuration file path on your Linux host system. -{{% admonition type="note" %}} +{{< admonition type="note" >}} For the flags to work correctly, you must expose the paths on your Linux host to the Docker container through a bind mount. -{{%/admonition %}} +{{< /admonition >}} ## Run a Windows Docker container @@ -61,9 +61,9 @@ Replace the following: * `CONFIG_FILE_PATH`: The configuration file path on your Windows host system. * `WAL_DATA_DIRECTORY`: the directory used to store your metrics before sending them to Prometheus. Old WAL data is cleaned up every hour and is used for recovery if the process crashes. -{{% admonition type="note" %}} +{{< admonition type="note" >}} For the flags to work correctly, you must expose the paths on your Windows host to the Docker container through a bind mount. -{{%/admonition %}} +{{< /admonition >}} ## Next steps diff --git a/docs/sources/static/set-up/install/install-agent-kubernetes.md b/docs/sources/static/set-up/install/install-agent-kubernetes.md index 95fdd5597b53..d55a7d9af2a5 100644 --- a/docs/sources/static/set-up/install/install-agent-kubernetes.md +++ b/docs/sources/static/set-up/install/install-agent-kubernetes.md @@ -23,10 +23,10 @@ You can use the Helm chart for Grafana Agent to deploy Grafana Agent in static m ## Deploy -{{% admonition type="note" %}} +{{< admonition type="note" >}} These instructions show you how to install the generic [Helm chart](https://github.com/grafana/agent/tree/main/operations/helm/charts/grafana-agent) for Grafana Agent. You can deploy Grafana Agent in static mode or flow mode. The Helm chart deploys flow mode by default. -{{% /admonition %}} +{{< /admonition >}} To deploy Grafana Agent in static mode on Kubernetes using Helm, run the following commands in a terminal window: @@ -52,10 +52,10 @@ To deploy Grafana Agent in static mode on Kubernetes using Helm, run the followi - _``_: The name to use for your Grafana Agent installation, such as `grafana-agent`. - {{% admonition type="warning" %}} + {{< admonition type="warning" >}} Always pass `--set agent.mode=static` in `helm install` or `helm upgrade` commands to ensure Grafana Agent gets installed in static mode. Alternatively, set `agent.mode` to `static` in your values.yaml file. - {{% /admonition %}} + {{< /admonition >}} For more information on the Grafana Agent Helm chart, refer to the Helm chart documentation on [Artifact Hub][]. diff --git a/docs/sources/static/set-up/install/install-agent-macos.md b/docs/sources/static/set-up/install/install-agent-macos.md index 48f383ee6b7d..c23bd59ec52b 100644 --- a/docs/sources/static/set-up/install/install-agent-macos.md +++ b/docs/sources/static/set-up/install/install-agent-macos.md @@ -19,9 +19,9 @@ You can install Grafana Agent in static mode on macOS with Homebrew. Install [Homebrew][] on your computer. -{{% admonition type="note" %}} +{{< admonition type="note" >}} The default prefix for Homebrew on Intel is `/usr/local`. The default prefix for Homebrew on Apple Silicon is `/opt/Homebrew`. To verify the default prefix for Homebrew on your computer, open a terminal window and type `brew --prefix`. -{{% /admonition %}} +{{< /admonition >}} [Homebrew]: https://brew.sh @@ -76,9 +76,9 @@ brew uninstall grafana-agent 1. Edit `$(brew --prefix)/etc/grafana-agent/config.yml` and add the configuration blocks for your specific telemetry needs. Refer to [Configure Grafana Agent][configure] for more information. -{{% admonition type="note" %}} +{{< admonition type="note" >}} To send your data to Grafana Cloud, set up Grafana Agent using the Grafana Cloud integration. Refer to [how to install an integration](/docs/grafana-cloud/data-configuration/integrations/install-and-manage-integrations/) and [macOS integration](/docs/grafana-cloud/data-configuration/integrations/integration-reference/integration-macos-node/). -{{%/admonition %}} +{{< /admonition >}} ## Next steps diff --git a/go.mod b/go.mod index c3976ceb0790..c9800748482b 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,7 @@ require ( github.com/google/dnsmasq_exporter v0.2.1-0.20230620100026-44b14480804a github.com/google/go-cmp v0.6.0 github.com/google/go-jsonnet v0.18.0 - github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 + github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 github.com/google/renameio/v2 v2.0.0 github.com/google/uuid v1.4.0 github.com/gorilla/mux v1.8.0 @@ -56,11 +56,11 @@ require ( github.com/grafana/go-gelf/v2 v2.0.1 // Loki main commit where the Prometheus dependency matches ours. TODO(@tpaschalis) Update to kXYZ branch once it's available github.com/grafana/loki v1.6.2-0.20231004111112-07cbef92268a - github.com/grafana/pyroscope-go/godeltaprof v0.1.6 + github.com/grafana/pyroscope-go/godeltaprof v0.1.7 github.com/grafana/pyroscope/api v0.4.0 github.com/grafana/pyroscope/ebpf v0.4.1 github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db - github.com/grafana/river v0.3.0 + github.com/grafana/river v0.3.1-0.20240123144725-960753160cd1 github.com/grafana/snowflake-prometheus-exporter v0.0.0-20221213150626-862cad8e9538 github.com/grafana/tail v0.0.0-20230510142333-77b18831edf0 github.com/grafana/vmware_exporter v0.0.4-beta @@ -94,7 +94,7 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/ncabatoff/process-exporter v0.7.10 github.com/nerdswords/yet-another-cloudwatch-exporter v0.55.0 - github.com/ohler55/ojg v1.20.0 // indirect + github.com/ohler55/ojg v1.20.1 // indirect github.com/oklog/run v1.1.0 github.com/olekukonko/tablewriter v0.0.5 github.com/oliver006/redis_exporter v1.54.0 @@ -109,12 +109,14 @@ require ( github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.87.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/loki v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.87.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanprocessor v0.87.0 @@ -606,7 +608,7 @@ require github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab require ( connectrpc.com/connect v1.14.0 github.com/githubexporter/github-exporter v0.0.0-20231025122338-656e7dc33fe7 - github.com/grafana/jfr-parser/pprof v0.0.0-20240108100259-6b558fedfb6f + github.com/grafana/jfr-parser/pprof v0.0.0-20240126072739-986e71dc0361 github.com/natefinch/atomic v1.0.1 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.87.0 github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.87.0 @@ -620,7 +622,9 @@ require ( dario.cat/mergo v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0 // indirect github.com/Shopify/sarama v1.38.1 // indirect + github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/Workiva/go-datastructures v1.1.0 // indirect github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.26.0 // indirect github.com/channelmeter/iso8601duration v0.0.0-20150204201828-8da3af7a2a61 // indirect @@ -630,7 +634,7 @@ require ( github.com/go-jose/go-jose/v3 v3.0.1 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/grafana/jfr-parser v0.7.2-0.20240108100259-6b558fedfb6f // indirect + github.com/grafana/jfr-parser v0.8.0 // indirect github.com/hetznercloud/hcloud-go/v2 v2.4.0 // indirect github.com/julienschmidt/httprouter v1.3.0 // indirect github.com/knadh/koanf/v2 v2.0.1 // indirect @@ -639,8 +643,11 @@ require ( github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 // indirect github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 // indirect github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.87.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.87.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.87.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.87.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.87.0 // indirect github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.87.0 // indirect github.com/openshift/api v3.9.0+incompatible // indirect github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect @@ -685,9 +692,10 @@ replace ( // // * There is a release of Prometheus which contains // prometheus/prometheus#13002 +// and prometheus/prometheus#13497 // We use the last v1-related tag as the replace statement does not work for v2 // tags without the v2 suffix to the module root. -replace github.com/prometheus/prometheus => github.com/grafana/prometheus v1.8.2-0.20240105105355-3e2c486167d2 // grafana/prometheus@drop-old-inmemory-samples-squashed-2 +replace github.com/prometheus/prometheus => github.com/grafana/prometheus v1.8.2-0.20240130142130-51b39f24d406 // cmp_header_order branch replace gopkg.in/yaml.v2 => github.com/rfratto/go-yaml v0.0.0-20211119180816-77389c3526dc diff --git a/go.sum b/go.sum index 9db8621684ca..6710608f0f42 100644 --- a/go.sum +++ b/go.sum @@ -183,6 +183,8 @@ github.com/DataDog/datadog-go v0.0.0-20160329135253-cc2f4770f4d6/go.mod h1:LButx github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962 h1:KeNholpO2xKjgaaSyd+DyQRrsQjhbSeS7qe4nEw8aQw= github.com/GehirnInc/crypt v0.0.0-20200316065508-bb7000b8a962/go.mod h1:kC29dT1vFpj7py2OvG1khBdQpo3kInWP+6QipLbdngo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0 h1:tk85AYGwOf6VNtoOQi8w/kVDi2vmPxp3/OU2FsUpdcA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.20.0/go.mod h1:Xx0VKh7GJ4si3rmElbh19Mejxz68ibWg/J30ZOMrqzU= github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/IBM/sarama v1.42.1 h1:wugyWa15TDEHh2kvq2gAy1IHLjEjuYOYgXz/ruC/OSQ= @@ -237,6 +239,8 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWso github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0= +github.com/Showmax/go-fqdn v1.0.0 h1:0rG5IbmVliNT5O19Mfuvna9LL7zlHyRfsSvBPZmF9tM= +github.com/Showmax/go-fqdn v1.0.0/go.mod h1:SfrFBzmDCtCGrnHhoDjuvFnKsWjEQX/Q9ARZvOrJAko= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -990,8 +994,8 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240117000934-35fc243c5815 h1:WzfWbQz/Ze8v6l++GGbGNFZnUShVpP/0xffCPLL+ax8= +github.com/google/pprof v0.0.0-20240117000934-35fc243c5815/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= @@ -1053,10 +1057,10 @@ github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85/go.mod h1:crI9WX6p0IhrqB+DqIUHulRW853PaNFf7o4UprV//3I= github.com/grafana/gomemcache v0.0.0-20230316202710-a081dae0aba9 h1:WB3bGH2f1UN6jkd6uAEWfHB8OD7dKJ0v2Oo6SNfhpfQ= github.com/grafana/gomemcache v0.0.0-20230316202710-a081dae0aba9/go.mod h1:PGk3RjYHpxMM8HFPhKKo+vve3DdlPUELZLSDEFehPuU= -github.com/grafana/jfr-parser v0.7.2-0.20240108100259-6b558fedfb6f h1:ioj3QOLQAc9MmXgDzzCNeA9KP6DbtH+CjUZKx1/99tE= -github.com/grafana/jfr-parser v0.7.2-0.20240108100259-6b558fedfb6f/go.mod h1:M5u1ux34Qo47ZBWksbMYVk40s7dvU3WMVYpxweEu4R0= -github.com/grafana/jfr-parser/pprof v0.0.0-20240108100259-6b558fedfb6f h1:IOUaXkpk5kxF00nwa34fTHMsX6lxq0So+fjiOA5ascM= -github.com/grafana/jfr-parser/pprof v0.0.0-20240108100259-6b558fedfb6f/go.mod h1:SDOa+U4/vAkuGjYBWXJpHpiaesbeXuhaQW4xzHzTSL4= +github.com/grafana/jfr-parser v0.8.0 h1:/uo2wZNXrxw7tKLFwP2omJ3EQGMkD9wzhPsRogVofc0= +github.com/grafana/jfr-parser v0.8.0/go.mod h1:M5u1ux34Qo47ZBWksbMYVk40s7dvU3WMVYpxweEu4R0= +github.com/grafana/jfr-parser/pprof v0.0.0-20240126072739-986e71dc0361 h1:TtNajaiSRfM2Mz8N7ouFQDFlviXbIEk9Hts0yoZnhGM= +github.com/grafana/jfr-parser/pprof v0.0.0-20240126072739-986e71dc0361/go.mod h1:P5406BrWxjahTzVF6aCSumNI1KPlZJc0zO0v+zKZ4gc= github.com/grafana/loki v1.6.2-0.20231004111112-07cbef92268a h1:lvSHlNONeo/H+aWRk86QEfBpRDCEX1yoqpsCK0Tys+g= github.com/grafana/loki v1.6.2-0.20231004111112-07cbef92268a/go.mod h1:a5c5ZTC6FNufKkvF8NeDAb2nCWJpgkVDrejmV+O9hac= github.com/grafana/loki/pkg/push v0.0.0-20231212100434-384e5c2dc872 h1:6kPX7bngjBgUlHqADwZ6249UtzMaoQW5n0H8bOtnYeM= @@ -1071,18 +1075,18 @@ github.com/grafana/opentelemetry-collector/service v0.0.0-20231018134914-c0109e0 github.com/grafana/opentelemetry-collector/service v0.0.0-20231018134914-c0109e052230/go.mod h1:kBdpzrqR2wJkOdg50yzp4dv+2XBMyeqTgF4lCx0hSpQ= github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520 h1:HnFWqxhoSF3WC7sKAdMZ+SRXvHLVZlZ3sbQjuUlTqkw= github.com/grafana/postgres_exporter v0.8.1-0.20210722175051-db35d7c2f520/go.mod h1:+HPXgiOV0InDHcZ2jNijL1SOKvo0eEPege5fQA0+ICI= -github.com/grafana/prometheus v1.8.2-0.20240105105355-3e2c486167d2 h1:eJD8U9G91ID/pKsLjJnjqve8yv1NiE/l6dGYnwchPVM= -github.com/grafana/prometheus v1.8.2-0.20240105105355-3e2c486167d2/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= -github.com/grafana/pyroscope-go/godeltaprof v0.1.6 h1:nEdZ8louGAplSvIJi1HVp7kWvFvdiiYg3COLlTwJiFo= -github.com/grafana/pyroscope-go/godeltaprof v0.1.6/go.mod h1:Tk376Nbldo4Cha9RgiU7ik8WKFkNpfds98aUzS8omLE= +github.com/grafana/prometheus v1.8.2-0.20240130142130-51b39f24d406 h1:LVIOYe5j92m10wluP5hgeHqSkOLnZzcPxhYCkdbLXCE= +github.com/grafana/prometheus v1.8.2-0.20240130142130-51b39f24d406/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= +github.com/grafana/pyroscope-go/godeltaprof v0.1.7 h1:C11j63y7gymiW8VugJ9ZW0pWfxTZugdSJyC48olk5KY= +github.com/grafana/pyroscope-go/godeltaprof v0.1.7/go.mod h1:Tk376Nbldo4Cha9RgiU7ik8WKFkNpfds98aUzS8omLE= github.com/grafana/pyroscope/api v0.4.0 h1:J86DxoNeLOvtJhB1Cn65JMZkXe682D+RqeoIUiYc/eo= github.com/grafana/pyroscope/api v0.4.0/go.mod h1:MFnZNeUM4RDsDOnbgKW3GWoLSBpLzMMT9nkvhHHo81o= github.com/grafana/pyroscope/ebpf v0.4.1 h1:iqQoOsfKen5KpTRe6MfGeBZfgK1s7ROH+Cs/vZs1B3A= github.com/grafana/pyroscope/ebpf v0.4.1/go.mod h1:W99Mq+yJGP5nZUQWNv+jVytiWWgWXwHjIRmi9k3xHzA= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db h1:7aN5cccjIqCLTzedH7MZzRZt5/lsAHch6Z3L2ZGn5FA= github.com/grafana/regexp v0.0.0-20221123153739-15dc172cd2db/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/grafana/river v0.3.0 h1:6TsaR/vkkcppUM9I0muGbPIUedCtpPu6OWreE5+CE6g= -github.com/grafana/river v0.3.0/go.mod h1:icSidCSHYXJUYy6TjGAi/D+X7FsP7Gc7cxvBUIwYMmY= +github.com/grafana/river v0.3.1-0.20240123144725-960753160cd1 h1:mCOKdWkLv8n9X0ORWrPR+W/zLOAa1o6iM+Dfy0ofQUs= +github.com/grafana/river v0.3.1-0.20240123144725-960753160cd1/go.mod h1:tAiNX2zt3HUsNyPNUDSvE6AgQ4+kqJvljBI+ACppMtM= github.com/grafana/smimesign v0.2.1-0.20220408144937-2a5adf3481d3 h1:UPkAxuhlAcRmJT3/qd34OMTl+ZU7BLLfOO2+NXBlJpY= github.com/grafana/smimesign v0.2.1-0.20220408144937-2a5adf3481d3/go.mod h1:iZiiwNT4HbtGRVqCQu7uJPEZCuEE5sfSSttcnePkDl4= github.com/grafana/snowflake-prometheus-exporter v0.0.0-20221213150626-862cad8e9538 h1:tkT0yha3JzB5S5VNjfY4lT0cJAe20pU8XGt3Nuq73rM= @@ -1426,6 +1430,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/k0kubun/pp/v3 v3.2.0 h1:h33hNTZ9nVFNP3u2Fsgz8JXiF5JINoZfFq4SvKJwNcs= +github.com/k0kubun/pp/v3 v3.2.0/go.mod h1:ODtJQbQcIRfAD3N+theGCV1m/CBxweERz2dapdz1EwA= github.com/kardianos/service v1.0.0/go.mod h1:8CzDhVuCuugtsHyZoTvsOBuvonN/UDBvl0kH+BUxvbo= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= @@ -1684,8 +1690,8 @@ github.com/npillmayer/nestext v0.1.3/go.mod h1:h2lrijH8jpicr25dFY+oAJLyzlya6jhnu github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/ohler55/ojg v1.20.0 h1:hmpsD9VyuoVH7bHCPtni9eCpOxiIhSlIEzNndXkCySY= -github.com/ohler55/ojg v1.20.0/go.mod h1:uHcD1ErbErC27Zhb5Df2jUjbseLLcmOCo6oxSr3jZxo= +github.com/ohler55/ojg v1.20.1 h1:Io65sHjMjYPI7yuhUr8VdNmIQdYU6asKeFhOs8xgBnY= +github.com/ohler55/ojg v1.20.1/go.mod h1:uHcD1ErbErC27Zhb5Df2jUjbseLLcmOCo6oxSr3jZxo= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v0.0.0-20180308005104-6934b124db28/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= @@ -1739,6 +1745,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2client github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension v0.87.0/go.mod h1:DRpgdIDMa+CFE96SoEPwigGBuZbwSNWotTgkJlrZMVc= github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.87.0 h1:Z4o71/rS7mmpJ/9uzta3/nTaT+vKt0CU35o4inDLA9Y= github.com/open-telemetry/opentelemetry-collector-contrib/extension/sigv4authextension v0.87.0/go.mod h1:clScLUe8m0CTZMcV0scqq+fFFvw5Q1dASkYlYsrRptM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.87.0 h1:JJsQ6iMFIDb7W6uLh6LQ5k4XOgWolr7ugVBoeV4l7hQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.87.0/go.mod h1:rDdtaUrMV6TJHqssyiYSfsLfFN1pIg4JOTDaE9AUapQ= github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.87.0 h1:W4Ty2pSyge/qNAOILO6HqyKrAcgALs0bn5CmpGZJXVo= github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.87.0/go.mod h1:3EFmVoLcdM8Adj75N8TGJ4txDB29oW1chTLCFiL/wxs= github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.87.0 h1:ekT4/I9J484j4yR/0VHj5AGtgv8KmNd+e4oXxNJNR/o= @@ -1751,6 +1759,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.87 github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.87.0/go.mod h1:ntSfqIeoGj0O+pXXyqDG9iTAw/PQg2JsO26EJ1GAKto= github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.87.0 h1:kDamu7uZHRmeJWqaJg42LSgprRGokmQ4t8ACslzS0GU= github.com/open-telemetry/opentelemetry-collector-contrib/internal/kafka v0.87.0/go.mod h1:EAw9aBkrDIDWQvRBdJiDkaJmCqcgZpiZzYZEvOjg4uI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.87.0 h1:8pVElJ4AMIiJxS+sxnK9CX73RED7iv/FYbqkvvX01ig= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.87.0/go.mod h1:zRQU4eN6rNXeVKD8g2p2Czb88o/Hd2BkVdar5nCk0+k= github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.87.0 h1:sx1ye7Y2rJ2qi11i2ih9T7BocxaV0uaBBf7B8ijCYpU= github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.87.0/go.mod h1:AobBiNPFNHUm0MJFTieajasG/xNMjMYI7BGGTSKh0xg= github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchpersignal v0.87.0 h1:sy75u6ZwBvRwv9RjEF65SqlkBsAeZFqF4+eFOLhIsJQ= @@ -1783,6 +1793,8 @@ github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattribute github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.87.0/go.mod h1:g6H0fB9TW03Lb8M+H0BXtgQp7gPncIwf3Fk73xOs9EA= github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.87.0 h1:QJKdtNcsxBhG2ZwSzYRVI0oxUqBJJvhfWf0OnjHU3jY= github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.87.0/go.mod h1:skMmFcl+gxyiOQXvwHc0IKpC73iyQ7zl9r1aRNmPMwI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.87.0 h1:gEv7UNu4K5ptvKIpWQmVS+0XMrIzqZWczcjyhLnsx9M= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.87.0/go.mod h1:6Rnjwj4bZU7Ab+nLD1YqQlbdsnsKoOR/OzyI42+PyE8= github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor v0.87.0 h1:BIGb6dfmaTlDE7KbiQUhnD9SvL5HanbJbWJrnzURfPY= github.com/open-telemetry/opentelemetry-collector-contrib/processor/servicegraphprocessor v0.87.0/go.mod h1:EnaQxXfCCWkSEfsQbGOvYbeJ/EuqvtMYTLTq8RN6TiY= github.com/open-telemetry/opentelemetry-collector-contrib/processor/spanmetricsprocessor v0.87.0 h1:4l/QetnprIMethZYfD2RK+MfMR83f6QycYb9bhJFItc= diff --git a/integration-tests/common/metrics_assert.go b/integration-tests/common/metrics_assert.go new file mode 100644 index 000000000000..5704f9e49ff0 --- /dev/null +++ b/integration-tests/common/metrics_assert.go @@ -0,0 +1,107 @@ +package common + +import ( + "fmt" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +const promURL = "http://localhost:9009/prometheus/api/v1/query?query=" + +// Default metrics list according to what the prom-gen app is generating. +var PromDefaultMetrics = []string{ + "golang_counter", + "golang_gauge", + "golang_histogram_bucket", + "golang_summary", +} + +// Default histogram metrics list according to what the prom-gen app is generating. +var PromDefaultHistogramMetric = []string{ + "golang_native_histogram", +} + +// Default metrics list according to what the otel-metrics-gen app is generating. +var OtelDefaultMetrics = []string{ + "example_counter", + "example_float_counter", + "example_updowncounter", + "example_float_updowncounter", + "example_histogram_bucket", + "example_float_histogram_bucket", +} + +// Default histogram metrics list according to what the otel-metrics-gen app is generating. +var OtelDefaultHistogramMetrics = []string{ + "example_exponential_histogram", + "example_exponential_float_histogram", +} + +// MetricQuery returns a formatted Prometheus metric query with a given metricName and a given label. +func MetricQuery(metricName string, testName string) string { + return fmt.Sprintf("%s%s{test_name='%s'}", promURL, metricName, testName) +} + +// MimirMetricsTest checks that all given metrics are stored in Mimir. +func MimirMetricsTest(t *testing.T, metrics []string, histogramMetrics []string, testName string) { + for _, metric := range metrics { + metric := metric + t.Run(metric, func(t *testing.T) { + t.Parallel() + AssertMetricData(t, MetricQuery(metric, testName), metric, testName) + }) + } + for _, metric := range histogramMetrics { + metric := metric + t.Run(metric, func(t *testing.T) { + t.Parallel() + AssertHistogramData(t, MetricQuery(metric, testName), metric, testName) + }) + } +} + +// AssertHistogramData performs a Prometheus query and expect the result to eventually contain the expected histogram. +// The count and sum metrics should be greater than 10 before the timeout triggers. +func AssertHistogramData(t *testing.T, query string, expectedMetric string, testName string) { + var metricResponse MetricResponse + assert.EventuallyWithT(t, func(c *assert.CollectT) { + err := FetchDataFromURL(query, &metricResponse) + assert.NoError(c, err) + if assert.NotEmpty(c, metricResponse.Data.Result) { + assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) + assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, testName) + if assert.NotNil(c, metricResponse.Data.Result[0].Histogram) { + histogram := metricResponse.Data.Result[0].Histogram + if assert.NotEmpty(c, histogram.Data.Count) { + count, _ := strconv.Atoi(histogram.Data.Count) + assert.Greater(c, count, 10, "Count should be at some point greater than 10.") + } + if assert.NotEmpty(c, histogram.Data.Sum) { + sum, _ := strconv.ParseFloat(histogram.Data.Sum, 64) + assert.Greater(c, sum, 10., "Sum should be at some point greater than 10.") + } + assert.NotEmpty(c, histogram.Data.Buckets) + assert.Nil(c, metricResponse.Data.Result[0].Value) + } + } + }, DefaultTimeout, DefaultRetryInterval, "Histogram data did not satisfy the conditions within the time limit") +} + +// AssertMetricData performs a Prometheus query and expect the result to eventually contain the expected metric. +func AssertMetricData(t *testing.T, query, expectedMetric string, testName string) { + var metricResponse MetricResponse + assert.EventuallyWithT(t, func(c *assert.CollectT) { + err := FetchDataFromURL(query, &metricResponse) + assert.NoError(c, err) + if assert.NotEmpty(c, metricResponse.Data.Result) { + assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) + assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, testName) + fmt.Println(metricResponse.Data.Result[0]) + fmt.Println(metricResponse.Data.Result[0].Value) + assert.NotEmpty(c, metricResponse.Data.Result[0].Value.Value) + assert.Nil(c, metricResponse.Data.Result[0].Histogram) + } + }, DefaultTimeout, DefaultRetryInterval, "Data did not satisfy the conditions within the time limit") +} diff --git a/integration-tests/tests/otlp-metrics/otlp_metrics_test.go b/integration-tests/tests/otlp-metrics/otlp_metrics_test.go index 6c3676a20604..f244f57213ba 100644 --- a/integration-tests/tests/otlp-metrics/otlp_metrics_test.go +++ b/integration-tests/tests/otlp-metrics/otlp_metrics_test.go @@ -3,91 +3,11 @@ package main import ( - "fmt" - "strconv" "testing" "github.com/grafana/agent/integration-tests/common" - "github.com/stretchr/testify/assert" ) -const promURL = "http://localhost:9009/prometheus/api/v1/query?query=" - -func metricQuery(metricName string, testName string) string { - return fmt.Sprintf("%s%s{test_name='%s'}", promURL, metricName, testName) -} - func TestOTLPMetrics(t *testing.T) { - const testName = "otlp_metrics" - tests := []struct { - metric string - }{ - // TODO: better differentiate these metric types? - {"example_counter"}, - {"example_float_counter"}, - {"example_updowncounter"}, - {"example_float_updowncounter"}, - {"example_histogram_bucket"}, - {"example_float_histogram_bucket"}, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.metric, func(t *testing.T) { - t.Parallel() - assertMetricData(t, metricQuery(tt.metric, testName), tt.metric, testName) - }) - } - - histogramTests := []string{ - "example_exponential_histogram", - "example_exponential_float_histogram", - } - - for _, metric := range histogramTests { - metric := metric - t.Run(metric, func(t *testing.T) { - t.Parallel() - assertHistogramData(t, metricQuery(metric, testName), metric, testName) - }) - } -} - -func assertHistogramData(t *testing.T, query string, expectedMetric string, testName string) { - var metricResponse common.MetricResponse - assert.EventuallyWithT(t, func(c *assert.CollectT) { - err := common.FetchDataFromURL(query, &metricResponse) - assert.NoError(c, err) - if assert.NotEmpty(c, metricResponse.Data.Result) { - assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) - assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, testName) - if assert.NotNil(c, metricResponse.Data.Result[0].Histogram) { - histogram := metricResponse.Data.Result[0].Histogram - if assert.NotEmpty(c, histogram.Data.Count) { - count, _ := strconv.Atoi(histogram.Data.Count) - assert.Greater(c, count, 10, "Count should be at some point greater than 10.") - } - if assert.NotEmpty(c, histogram.Data.Sum) { - sum, _ := strconv.Atoi(histogram.Data.Sum) - assert.Greater(c, sum, 10, "Sum should be at some point greater than 10.") - } - assert.NotEmpty(c, histogram.Data.Buckets) - assert.Nil(c, metricResponse.Data.Result[0].Value) - } - } - }, common.DefaultTimeout, common.DefaultRetryInterval, "Histogram data did not satisfy the conditions within the time limit") -} - -func assertMetricData(t *testing.T, query, expectedMetric string, testName string) { - var metricResponse common.MetricResponse - assert.EventuallyWithT(t, func(c *assert.CollectT) { - err := common.FetchDataFromURL(query, &metricResponse) - assert.NoError(c, err) - if assert.NotEmpty(c, metricResponse.Data.Result) { - assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) - assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, testName) - assert.NotEmpty(c, metricResponse.Data.Result[0].Value.Value) - assert.Nil(c, metricResponse.Data.Result[0].Histogram) - } - }, common.DefaultTimeout, common.DefaultRetryInterval, "Data did not satisfy the conditions within the time limit") + common.MimirMetricsTest(t, common.OtelDefaultMetrics, common.OtelDefaultHistogramMetrics, "otlp_metrics") } diff --git a/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go b/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go index 9a0c5d780975..930de77ba403 100644 --- a/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go +++ b/integration-tests/tests/otlp-metrics/otlp_to_prom_metrics_test.go @@ -4,39 +4,20 @@ package main import ( "testing" + + "github.com/grafana/agent/integration-tests/common" ) func TestOTLPToPromMetrics(t *testing.T) { - const testName = "otlp_to_prom_metrics" - tests := []struct { - metric string - }{ - {"example_counter_total"}, - {"example_float_counter_total"}, - {"example_updowncounter"}, - {"example_float_updowncounter"}, - {"example_histogram_bucket"}, - {"example_float_histogram_bucket"}, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.metric, func(t *testing.T) { - t.Parallel() - assertMetricData(t, metricQuery(tt.metric, testName), tt.metric, testName) - }) + // Not using the default here because some metric names change during the conversion. + metrics := []string{ + "example_counter_total", // Change from example_counter to example_counter_total. + "example_float_counter_total", // Change from example_float_counter to example_float_counter_total. + "example_updowncounter", + "example_float_updowncounter", + "example_histogram_bucket", + "example_float_histogram_bucket", } - histogramTests := []string{ - "example_exponential_histogram", - "example_exponential_float_histogram", - } - - for _, metric := range histogramTests { - metric := metric - t.Run(metric, func(t *testing.T) { - t.Parallel() - assertHistogramData(t, metricQuery(metric, testName), metric, testName) - }) - } + common.MimirMetricsTest(t, metrics, common.OtelDefaultHistogramMetrics, "otlp_to_prom_metrics") } diff --git a/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go b/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go index daaaa519131e..8765c8930ea8 100644 --- a/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go +++ b/integration-tests/tests/scrape-prom-metrics/scrape_prom_metrics_test.go @@ -3,78 +3,11 @@ package main import ( - "fmt" - "strconv" "testing" "github.com/grafana/agent/integration-tests/common" - "github.com/stretchr/testify/assert" ) -const promURL = "http://localhost:9009/prometheus/api/v1/query?query=" - -func metricQuery(metricName string) string { - return fmt.Sprintf("%s%s{test_name='scrape_prom_metrics'}", promURL, metricName) -} - func TestScrapePromMetrics(t *testing.T) { - metrics := []string{ - // TODO: better differentiate these metric types? - "golang_counter", - "golang_gauge", - "golang_histogram_bucket", - "golang_summary", - "golang_native_histogram", - } - - for _, metric := range metrics { - metric := metric - t.Run(metric, func(t *testing.T) { - t.Parallel() - if metric == "golang_native_histogram" { - assertHistogramData(t, metricQuery(metric), metric) - } else { - assertMetricData(t, metricQuery(metric), metric) - } - }) - } -} - -func assertHistogramData(t *testing.T, query string, expectedMetric string) { - var metricResponse common.MetricResponse - assert.EventuallyWithT(t, func(c *assert.CollectT) { - err := common.FetchDataFromURL(query, &metricResponse) - assert.NoError(c, err) - if assert.NotEmpty(c, metricResponse.Data.Result) { - assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) - assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, "scrape_prom_metrics") - if assert.NotNil(c, metricResponse.Data.Result[0].Histogram) { - histogram := metricResponse.Data.Result[0].Histogram - if assert.NotEmpty(c, histogram.Data.Count) { - count, _ := strconv.Atoi(histogram.Data.Count) - assert.Greater(c, count, 10, "Count should be at some point greater than 10.") - } - if assert.NotEmpty(c, histogram.Data.Sum) { - sum, _ := strconv.ParseFloat(histogram.Data.Sum, 64) - assert.Greater(c, sum, 10., "Sum should be at some point greater than 10.") - } - assert.NotEmpty(c, histogram.Data.Buckets) - assert.Nil(c, metricResponse.Data.Result[0].Value) - } - } - }, common.DefaultTimeout, common.DefaultRetryInterval, "Histogram data did not satisfy the conditions within the time limit") -} - -func assertMetricData(t *testing.T, query, expectedMetric string) { - var metricResponse common.MetricResponse - assert.EventuallyWithT(t, func(c *assert.CollectT) { - err := common.FetchDataFromURL(query, &metricResponse) - assert.NoError(c, err) - if assert.NotEmpty(c, metricResponse.Data.Result) { - assert.Equal(c, metricResponse.Data.Result[0].Metric.Name, expectedMetric) - assert.Equal(c, metricResponse.Data.Result[0].Metric.TestName, "scrape_prom_metrics") - assert.NotEmpty(c, metricResponse.Data.Result[0].Value.Value) - assert.Nil(c, metricResponse.Data.Result[0].Histogram) - } - }, common.DefaultTimeout, common.DefaultRetryInterval, "Data did not satisfy the conditions within the time limit") + common.MimirMetricsTest(t, common.PromDefaultMetrics, common.PromDefaultHistogramMetric, "scrape_prom_metrics") } diff --git a/component/module/git/internal/vcs/auth.go b/internal/vcs/auth.go similarity index 100% rename from component/module/git/internal/vcs/auth.go rename to internal/vcs/auth.go diff --git a/component/module/git/internal/vcs/errors.go b/internal/vcs/errors.go similarity index 100% rename from component/module/git/internal/vcs/errors.go rename to internal/vcs/errors.go diff --git a/component/module/git/internal/vcs/git.go b/internal/vcs/git.go similarity index 100% rename from component/module/git/internal/vcs/git.go rename to internal/vcs/git.go diff --git a/component/module/git/internal/vcs/git_test.go b/internal/vcs/git_test.go similarity index 97% rename from component/module/git/internal/vcs/git_test.go rename to internal/vcs/git_test.go index 7680c857db0e..a7614eb9507f 100644 --- a/component/module/git/internal/vcs/git_test.go +++ b/internal/vcs/git_test.go @@ -6,7 +6,7 @@ import ( "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" - "github.com/grafana/agent/component/module/git/internal/vcs" + "github.com/grafana/agent/internal/vcs" "github.com/stretchr/testify/require" ) diff --git a/operations/agent-flow-mixin/alerts/clustering.libsonnet b/operations/agent-flow-mixin/alerts/clustering.libsonnet index bfa14f1f1e34..b4d5edc98988 100644 --- a/operations/agent-flow-mixin/alerts/clustering.libsonnet +++ b/operations/agent-flow-mixin/alerts/clustering.libsonnet @@ -8,7 +8,7 @@ alert.newGroup( 'ClusterNotConverging', 'stddev by (cluster, namespace) (sum without (state) (cluster_node_peers)) != 0', 'Cluster is not converging.', - '5m', + '10m', ), // Cluster has entered a split brain state. @@ -23,15 +23,7 @@ alert.newGroup( count by (cluster, namespace) (cluster_node_info) |||, 'Cluster nodes have entered a split brain state.', - '5m', - ), - - // Standard Deviation of Lamport clock time between nodes is too high. - alert.newRule( - 'ClusterLamportClockDrift', - 'stddev by (cluster, namespace) (cluster_node_lamport_time) > 4 * sqrt(count by (cluster, namespace) (cluster_node_info))', - "Cluster nodes' lamport clocks are not converging.", - '5m' + '10m', ), // Nodes health score is not zero. @@ -41,22 +33,7 @@ alert.newGroup( cluster_node_gossip_health_score > 0 |||, 'Cluster node is reporting a health score > 0.', - '5m', - ), - - // Lamport clock of a node is not progressing at all. - // - // This only checks for nodes that have peers other than themselves; nodes - // with no external peers will not increase their lamport time because - // there is no cluster networking traffic. - alert.newRule( - 'ClusterLamportClockStuck', - ||| - sum by (cluster, namespace, instance) (rate(cluster_node_lamport_time[2m])) == 0 - and on (cluster, namespace, instance) (cluster_node_peers > 1) - |||, - "Cluster nodes's lamport clocks is not progressing.", - '5m', + '10m', ), // Node tried to join the cluster with an already-present node name. @@ -72,7 +49,7 @@ alert.newGroup( 'ClusterNodeStuckTerminating', 'sum by (cluster, namespace, instance) (cluster_node_peers{state="terminating"}) > 0', 'Cluster node stuck in Terminating state.', - '5m', + '10m', ), // Nodes are not using the same configuration file. @@ -86,8 +63,5 @@ alert.newGroup( 'Cluster nodes are not using the same configuration file.', '5m', ), - - // TODO(@tpaschalis) Alert on open transport streams once we investigate - // their behavior. ] ) diff --git a/operations/agent-flow-mixin/dashboards.libsonnet b/operations/agent-flow-mixin/dashboards.libsonnet index 7ecf696bf38f..661de183dc96 100644 --- a/operations/agent-flow-mixin/dashboards.libsonnet +++ b/operations/agent-flow-mixin/dashboards.libsonnet @@ -2,7 +2,8 @@ grafanaDashboards+: (import './dashboards/controller.libsonnet') + (import './dashboards/resources.libsonnet') + - (import './dashboards/prometheus.remote_write.libsonnet') + + (import './dashboards/prometheus.libsonnet') + (import './dashboards/cluster-node.libsonnet') + + (import './dashboards/opentelemetry.libsonnet') + (import './dashboards/cluster-overview.libsonnet'), } diff --git a/operations/agent-flow-mixin/dashboards/opentelemetry.libsonnet b/operations/agent-flow-mixin/dashboards/opentelemetry.libsonnet new file mode 100644 index 000000000000..a88fdf3893ff --- /dev/null +++ b/operations/agent-flow-mixin/dashboards/opentelemetry.libsonnet @@ -0,0 +1,174 @@ +local dashboard = import './utils/dashboard.jsonnet'; +local panel = import './utils/panel.jsonnet'; +local filename = 'agent-flow-opentelemetry.json'; + +local stackedPanelMixin = { + fieldConfig+: { + defaults+: { + custom+: { + fillOpacity: 20, + gradientMode: 'hue', + stacking: { mode: 'normal' }, + }, + }, + }, +}; + +{ + [filename]: + dashboard.new(name='Grafana Agent Flow / OpenTelemetry') + + dashboard.withDashboardsLink() + + dashboard.withUID(std.md5(filename)) + + dashboard.withTemplateVariablesMixin([ + dashboard.newTemplateVariable('cluster', ||| + label_values(agent_component_controller_running_components, cluster) + |||), + dashboard.newTemplateVariable('namespace', ||| + label_values(agent_component_controller_running_components{cluster="$cluster"}, namespace) + |||), + dashboard.newMultiTemplateVariable('instance', ||| + label_values(agent_component_controller_running_components{cluster="$cluster", namespace="$namespace"}, instance) + |||), + ]) + + dashboard.withPanelsMixin([ + // "Receivers for traces" row + ( + panel.new('Receivers for traces [otelcol.receiver]', 'row') + + panel.withPosition({ h: 1, w: 24, x: 0, y: 0 }) + ), + ( + panel.new(title='Accepted spans', type='timeseries') + + panel.withDescription(||| + Number of spans successfully pushed into the pipeline. + |||) + + panel.withPosition({ x: 0, y: 0, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + rate(receiver_accepted_spans_ratio_total{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]) + |||, + //TODO: How will the dashboard look if there is more than one receiver component? The legend is not unique enough? + legendFormat='{{ pod }} / {{ transport }}', + ), + ]) + ), + ( + panel.new(title='Refused spans', type='timeseries') + + stackedPanelMixin + + panel.withDescription(||| + Number of spans that could not be pushed into the pipeline. + |||) + + panel.withPosition({ x: 8, y: 0, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + rate(receiver_refused_spans_ratio_total{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]) + |||, + legendFormat='{{ pod }} / {{ transport }}', + ), + ]) + ), + ( + panel.newHeatmap('RPC server duration (traces)') + + panel.withUnit('milliseconds') + + panel.withDescription(||| + The duration of inbound RPCs. + |||) + + panel.withPosition({ x: 16, y: 0, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr='sum by (le) (increase(rpc_server_duration_milliseconds_bucket{cluster="$cluster", namespace="$namespace", instance=~"$instance", rpc_service="opentelemetry.proto.collector.trace.v1.TraceService"}[$__rate_interval]))', + format='heatmap', + legendFormat='{{le}}', + ), + ]) + ), + + // "Batching" row + ( + panel.new('Batching [otelcol.processor.batch]', 'row') + + panel.withPosition({ h: 1, w: 24, x: 0, y: 10 }) + ), + ( + panel.newHeatmap('Number of units in the batch') + + panel.withDescription(||| + Number of units in the batch + |||) + + panel.withPosition({ x: 0, y: 10, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr='sum by (le) (increase(processor_batch_batch_send_size_ratio_bucket{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]))', + format='heatmap', + legendFormat='{{le}}', + ), + ]) + ), + ( + panel.new(title='Distinct metadata values', type='timeseries') + + panel.withDescription(||| + Number of distinct metadata value combinations being processed + |||) + + panel.withPosition({ x: 8, y: 10, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + processor_batch_metadata_cardinality_ratio{cluster="$cluster", namespace="$namespace", instance=~"$instance"} + |||, + legendFormat='{{ pod }}', + ), + ]) + ), + ( + panel.new(title='Timeout trigger', type='timeseries') + + panel.withDescription(||| + Number of times the batch was sent due to a timeout trigger + |||) + + panel.withPosition({ x: 16, y: 10, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + rate(processor_batch_timeout_trigger_send_ratio_total{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]) + |||, + legendFormat='{{ pod }}', + ), + ]) + ), + + // "Exporters for traces" row + ( + panel.new('Exporters for traces [otelcol.exporter]', 'row') + + panel.withPosition({ h: 1, w: 24, x: 0, y: 20 }) + ), + ( + panel.new(title='Exported sent spans', type='timeseries') + + panel.withDescription(||| + Number of spans successfully sent to destination. + |||) + + panel.withPosition({ x: 0, y: 20, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + rate(exporter_sent_spans_ratio_total{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]) + |||, + legendFormat='{{ pod }}', + ), + ]) + ), + ( + panel.new(title='Exported failed spans', type='timeseries') + + panel.withDescription(||| + Number of spans in failed attempts to send to destination. + |||) + + panel.withPosition({ x: 8, y: 20, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + rate(exporter_send_failed_spans_ratio_total{cluster="$cluster", namespace="$namespace", instance=~"$instance"}[$__rate_interval]) + |||, + legendFormat='{{ pod }}', + ), + ]) + ), + + ]), +} diff --git a/operations/agent-flow-mixin/dashboards/prometheus.libsonnet b/operations/agent-flow-mixin/dashboards/prometheus.libsonnet new file mode 100644 index 000000000000..21ae79f3b063 --- /dev/null +++ b/operations/agent-flow-mixin/dashboards/prometheus.libsonnet @@ -0,0 +1,426 @@ +local dashboard = import './utils/dashboard.jsonnet'; +local panel = import './utils/panel.jsonnet'; +local filename = 'agent-flow-prometheus-remote-write.json'; + +local stackedPanelMixin = { + fieldConfig+: { + defaults+: { + custom+: { + fillOpacity: 20, + gradientMode: 'hue', + stacking: { mode: 'normal' }, + }, + }, + }, +}; + +local scrapePanels(y_offset) = [ + panel.newRow(title='prometheus.scrape', y=y_offset), + + // Scrape success rate + ( + panel.new(title='Scrape success rate in $cluster', type='timeseries') + + panel.withUnit('percentunit') + + panel.withDescription(||| + Percentage of targets successfully scraped by prometheus.scrape + components. + + This metric is calculated by dividing the number of targets + successfully scraped by the total number of targets scraped, + across all the namespaces in the selected cluster. + + Low success rates can indicate a problem with scrape targets, + stale service discovery, or agent misconfiguration. + |||) + + panel.withPosition({ x: 0, y: 1 + y_offset, w: 12, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum(up{cluster="$cluster"}) + / + count (up{cluster="$cluster"}) + |||, + legendFormat='% of targets successfully scraped', + ), + ]) + ), + + // Scrape duration + ( + panel.new(title='Scrape duration in $cluster', type='timeseries') + + panel.withUnit('s') + + panel.withDescription(||| + Duration of successful scrapes by prometheus.scrape components, + across all the namespaces in the selected cluster. + + This metric should be below your configured scrape interval. + High durations can indicate a problem with a scrape target or + a performance issue with the agent. + |||) + + panel.withPosition({ x: 12, y: 1 + y_offset, w: 12, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + quantile(0.99, scrape_duration_seconds{cluster="$cluster"}) + |||, + legendFormat='p99', + ), + panel.newQuery( + expr=||| + quantile(0.95, scrape_duration_seconds{cluster="$cluster"}) + |||, + legendFormat='p95', + ), + panel.newQuery( + expr=||| + quantile(0.50, scrape_duration_seconds{cluster="$cluster"}) + |||, + legendFormat='p50', + ), + + ]) + ), +]; + +local remoteWritePanels(y_offset) = [ + panel.newRow(title='prometheus.remote_write', y=y_offset), + + // WAL delay + ( + panel.new(title='WAL delay', type='timeseries') + + panel.withUnit('s') + + panel.withDescription(||| + How far behind prometheus.remote_write from samples recently written + to the WAL. + + Each endpoint prometheus.remote_write is configured to send metrics + has its own delay. The time shown here is the sum across all + endpoints for the given component. + + It is normal for the WAL delay to be within 1-3 scrape intervals. If + the WAL delay continues to increase beyond that amount, try + increasing the number of maximum shards. + |||) + + panel.withPosition({ x: 0, y: 1 + y_offset, w: 6, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum by (instance, component_id) ( + prometheus_remote_storage_highest_timestamp_in_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component"} + - ignoring(url, remote_name) group_right(instance) + prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Data write throughput + ( + panel.new(title='Data write throughput', type='timeseries') + + stackedPanelMixin + + panel.withUnit('Bps') + + panel.withDescription(||| + Rate of data containing samples and metadata sent by + prometheus.remote_write. + |||) + + panel.withPosition({ x: 6, y: 1 + y_offset, w: 6, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum without (remote_name, url) ( + rate(prometheus_remote_storage_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + + rate(prometheus_remote_storage_metadata_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Write latency + ( + panel.new(title='Write latency', type='timeseries') + + panel.withUnit('s') + + panel.withDescription(||| + Latency of writes to the remote system made by + prometheus.remote_write. + |||) + + panel.withPosition({ x: 12, y: 1 + y_offset, w: 6, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + histogram_quantile(0.99, sum by (le) ( + rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + )) + |||, + legendFormat='99th percentile', + ), + panel.newQuery( + expr=||| + histogram_quantile(0.50, sum by (le) ( + rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + )) + |||, + legendFormat='50th percentile', + ), + panel.newQuery( + expr=||| + sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component"}[$__rate_interval])) / + sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_count{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component"}[$__rate_interval])) + |||, + legendFormat='Average', + ), + ]) + ), + + // Shards + ( + local minMaxOverride = { + properties: [{ + id: 'custom.lineStyle', + value: { + dash: [10, 15], + fill: 'dash', + }, + }, { + id: 'custom.showPoints', + value: 'never', + }, { + id: 'custom.hideFrom', + value: { + legend: true, + tooltip: false, + viz: false, + }, + }], + }; + + panel.new(title='Shards', type='timeseries') { + fieldConfig+: { + overrides: [ + minMaxOverride { matcher: { id: 'byName', options: 'Minimum' } }, + minMaxOverride { matcher: { id: 'byName', options: 'Maximum' } }, + ], + }, + } + + panel.withUnit('none') + + panel.withDescription(||| + Total number of shards which are concurrently sending samples read + from the Write-Ahead Log. + + Shards are bound to a minimum and maximum, displayed on the graph. + The lowest minimum and the highest maximum across all clients is + shown. + + Each client has its own set of shards, minimum shards, and maximum + shards; filter to a specific URL to display more granular + information. + |||) + + panel.withPosition({ x: 18, y: 1 + y_offset, w: 6, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum without (remote_name, url) ( + prometheus_remote_storage_shards{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + panel.newQuery( + expr=||| + min ( + prometheus_remote_storage_shards_min{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + ) + |||, + legendFormat='Minimum', + ), + panel.newQuery( + expr=||| + max ( + prometheus_remote_storage_shards_max{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} + ) + |||, + legendFormat='Maximum', + ), + ]) + ), + + // Sent samples / second + ( + panel.new(title='Sent samples / second', type='timeseries') + + stackedPanelMixin + + panel.withUnit('cps') + + panel.withDescription(||| + Total outgoing samples sent by prometheus.remote_write. + |||) + + panel.withPosition({ x: 0, y: 11 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum without (url, remote_name) ( + rate(prometheus_remote_storage_samples_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Failed samples / second + ( + panel.new(title='Failed samples / second', type='timeseries') + + stackedPanelMixin + + panel.withUnit('cps') + + panel.withDescription(||| + Rate of samples which prometheus.remote_write could not send due to + non-recoverable errors. + |||) + + panel.withPosition({ x: 8, y: 11 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum without (url,remote_name) ( + rate(prometheus_remote_storage_samples_failed_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Retried samples / second + ( + panel.new(title='Retried samples / second', type='timeseries') + + stackedPanelMixin + + panel.withUnit('cps') + + panel.withDescription(||| + Rate of samples which prometheus.remote_write attempted to resend + after receiving a recoverable error. + |||) + + panel.withPosition({ x: 16, y: 11 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum without (url,remote_name) ( + rate(prometheus_remote_storage_samples_retried_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + ) + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Active series (Total) + ( + panel.new(title='Active series (total)', type='timeseries') { + options+: { + legend+: { + showLegend: false, + }, + }, + } + + panel.withUnit('short') + + panel.withDescription(||| + Total number of active series across all components. + + An "active series" is a series that prometheus.remote_write recently + received a sample for. Active series are garbage collected whenever a + truncation of the WAL occurs. + |||) + + panel.withPosition({ x: 0, y: 21 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum(agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}) + |||, + legendFormat='Series', + ), + ]) + ), + + // Active series (by instance/component) + ( + panel.new(title='Active series (by instance/component)', type='timeseries') + + panel.withUnit('short') + + panel.withDescription(||| + Total number of active series which are currently being tracked by + prometheus.remote_write components, with separate lines for each agent instance. + + An "active series" is a series that prometheus.remote_write recently + received a sample for. Active series are garbage collected whenever a + truncation of the WAL occurs. + |||) + + panel.withPosition({ x: 8, y: 21 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"} + |||, + legendFormat='{{instance}} / {{component_id}}', + ), + ]) + ), + + // Active series (by component) + ( + panel.new(title='Active series (by component)', type='timeseries') + + panel.withUnit('short') + + panel.withDescription(||| + Total number of active series which are currently being tracked by + prometheus.remote_write components, aggregated across all instances. + + An "active series" is a series that prometheus.remote_write recently + received a sample for. Active series are garbage collected whenever a + truncation of the WAL occurs. + |||) + + panel.withPosition({ x: 16, y: 21 + y_offset, w: 8, h: 10 }) + + panel.withQueries([ + panel.newQuery( + expr=||| + sum by (component_id) (agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"}) + |||, + legendFormat='{{component_id}}', + ), + ]) + ), +]; + +{ + [filename]: + dashboard.new(name='Grafana Agent Flow / Prometheus Components') + + dashboard.withDocsLink( + url='https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write/', + desc='Component documentation', + ) + + dashboard.withDashboardsLink() + + dashboard.withUID(std.md5(filename)) + + dashboard.withTemplateVariablesMixin([ + dashboard.newTemplateVariable('cluster', ||| + label_values(agent_component_controller_running_components, cluster) + |||), + dashboard.newTemplateVariable('namespace', ||| + label_values(agent_component_controller_running_components{cluster="$cluster"}, namespace) + |||), + dashboard.newMultiTemplateVariable('instance', ||| + label_values(agent_component_controller_running_components{cluster="$cluster", namespace="$namespace"}, instance) + |||), + dashboard.newMultiTemplateVariable('component', ||| + label_values(agent_wal_samples_appended_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"prometheus\\.remote_write\\..*"}, component_id) + |||), + dashboard.newMultiTemplateVariable('url', ||| + label_values(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component"}, url) + |||), + ]) + + // TODO(@tpaschalis) Make the annotation optional. + dashboard.withAnnotations([ + dashboard.newLokiAnnotation('Deployments', '{cluster="$cluster", container="kube-diff-logger"} | json | namespace_extracted="grafana-agent" | name_extracted=~"grafana-agent.*"', 'rgba(0, 211, 255, 1)'), + ]) + + dashboard.withPanelsMixin( + // First row, offset is 0 + scrapePanels(y_offset=0) + + // Scrape panels take 11 units, so offset next row by 11. + remoteWritePanels(y_offset=11) + ), +} diff --git a/operations/agent-flow-mixin/dashboards/prometheus.remote_write.libsonnet b/operations/agent-flow-mixin/dashboards/prometheus.remote_write.libsonnet deleted file mode 100644 index d9e0d8d170b4..000000000000 --- a/operations/agent-flow-mixin/dashboards/prometheus.remote_write.libsonnet +++ /dev/null @@ -1,352 +0,0 @@ -local dashboard = import './utils/dashboard.jsonnet'; -local panel = import './utils/panel.jsonnet'; -local filename = 'agent-flow-prometheus-remote-write.json'; - -local stackedPanelMixin = { - fieldConfig+: { - defaults+: { - custom+: { - fillOpacity: 20, - gradientMode: 'hue', - stacking: { mode: 'normal' }, - }, - }, - }, -}; - -{ - [filename]: - dashboard.new(name='Grafana Agent Flow / prometheus.remote_write') + - dashboard.withDocsLink( - url='https://grafana.com/docs/agent/latest/flow/reference/components/prometheus.remote_write/', - desc='Component documentation', - ) + - dashboard.withDashboardsLink() + - dashboard.withUID(std.md5(filename)) + - dashboard.withTemplateVariablesMixin([ - dashboard.newTemplateVariable('cluster', ||| - label_values(agent_component_controller_running_components, cluster) - |||), - dashboard.newTemplateVariable('namespace', ||| - label_values(agent_component_controller_running_components{cluster="$cluster"}, namespace) - |||), - dashboard.newMultiTemplateVariable('instance', ||| - label_values(agent_component_controller_running_components{cluster="$cluster", namespace="$namespace"}, instance) - |||), - dashboard.newMultiTemplateVariable('component', ||| - label_values(agent_wal_samples_appended_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"prometheus\\.remote_write\\..*"}, component_id) - |||), - dashboard.newMultiTemplateVariable('url', ||| - label_values(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component"}, url) - |||), - ]) + - // TODO(@tpaschalis) Make the annotation optional. - dashboard.withAnnotations([ - dashboard.newLokiAnnotation('Deployments', '{cluster="$cluster", container="kube-diff-logger"} | json | namespace_extracted="grafana-agent" | name_extracted=~"grafana-agent.*"', 'rgba(0, 211, 255, 1)'), - ]) + - dashboard.withPanelsMixin([ - // WAL delay - ( - panel.new(title='WAL delay', type='timeseries') + - panel.withUnit('s') + - panel.withDescription(||| - How far behind prometheus.remote_write from samples recently written - to the WAL. - - Each endpoint prometheus.remote_write is configured to send metrics - has its own delay. The time shown here is the sum across all - endpoints for the given component. - - It is normal for the WAL delay to be within 1-3 scrape intervals. If - the WAL delay continues to increase beyond that amount, try - increasing the number of maximum shards. - |||) + - panel.withPosition({ x: 0, y: 0, w: 6, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum by (instance, component_id) ( - prometheus_remote_storage_highest_timestamp_in_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component"} - - ignoring(url, remote_name) group_right(instance) - prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Data write throughput - ( - panel.new(title='Data write throughput', type='timeseries') + - stackedPanelMixin + - panel.withUnit('Bps') + - panel.withDescription(||| - Rate of data containing samples and metadata sent by - prometheus.remote_write. - |||) + - panel.withPosition({ x: 6, y: 0, w: 6, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum without (remote_name, url) ( - rate(prometheus_remote_storage_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) + - rate(prometheus_remote_storage_metadata_bytes_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Write latency - ( - panel.new(title='Write latency', type='timeseries') + - panel.withUnit('s') + - panel.withDescription(||| - Latency of writes to the remote system made by - prometheus.remote_write. - |||) + - panel.withPosition({ x: 12, y: 0, w: 6, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - histogram_quantile(0.99, sum by (le) ( - rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - )) - |||, - legendFormat='99th percentile', - ), - panel.newQuery( - expr=||| - histogram_quantile(0.50, sum by (le) ( - rate(prometheus_remote_storage_sent_batch_duration_seconds_bucket{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - )) - |||, - legendFormat='50th percentile', - ), - panel.newQuery( - expr=||| - sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_sum{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component"}[$__rate_interval])) / - sum(rate(prometheus_remote_storage_sent_batch_duration_seconds_count{cluster="$cluster",namespace="$namespace",instance=~"$instance", component_id=~"$component"}[$__rate_interval])) - |||, - legendFormat='Average', - ), - ]) - ), - - // Shards - ( - local minMaxOverride = { - properties: [{ - id: 'custom.lineStyle', - value: { - dash: [10, 15], - fill: 'dash', - }, - }, { - id: 'custom.showPoints', - value: 'never', - }, { - id: 'custom.hideFrom', - value: { - legend: true, - tooltip: false, - viz: false, - }, - }], - }; - - panel.new(title='Shards', type='timeseries') { - fieldConfig+: { - overrides: [ - minMaxOverride { matcher: { id: 'byName', options: 'Minimum' } }, - minMaxOverride { matcher: { id: 'byName', options: 'Maximum' } }, - ], - }, - } + - panel.withUnit('none') + - panel.withDescription(||| - Total number of shards which are concurrently sending samples read - from the Write-Ahead Log. - - Shards are bound to a minimum and maximum, displayed on the graph. - The lowest minimum and the highest maximum across all clients is - shown. - - Each client has its own set of shards, minimum shards, and maximum - shards; filter to a specific URL to display more granular - information. - |||) + - panel.withPosition({ x: 18, y: 0, w: 6, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum without (remote_name, url) ( - prometheus_remote_storage_shards{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - panel.newQuery( - expr=||| - min ( - prometheus_remote_storage_shards_min{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} - ) - |||, - legendFormat='Minimum', - ), - panel.newQuery( - expr=||| - max ( - prometheus_remote_storage_shards_max{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"} - ) - |||, - legendFormat='Maximum', - ), - ]) - ), - - - // Sent samples / second - ( - panel.new(title='Sent samples / second', type='timeseries') + - stackedPanelMixin + - panel.withUnit('cps') + - panel.withDescription(||| - Total outgoing samples sent by prometheus.remote_write. - |||) + - panel.withPosition({ x: 0, y: 10, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum without (url, remote_name) ( - rate(prometheus_remote_storage_samples_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Failed samples / second - ( - panel.new(title='Failed samples / second', type='timeseries') + - stackedPanelMixin + - panel.withUnit('cps') + - panel.withDescription(||| - Rate of samples which prometheus.remote_write could not send due to - non-recoverable errors. - |||) + - panel.withPosition({ x: 8, y: 10, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum without (url,remote_name) ( - rate(prometheus_remote_storage_samples_failed_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Retried samples / second - ( - panel.new(title='Retried samples / second', type='timeseries') + - stackedPanelMixin + - panel.withUnit('cps') + - panel.withDescription(||| - Rate of samples which prometheus.remote_write attempted to resend - after receiving a recoverable error. - |||) + - panel.withPosition({ x: 16, y: 10, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum without (url,remote_name) ( - rate(prometheus_remote_storage_samples_retried_total{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}[$__rate_interval]) - ) - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Active series (Total) - ( - panel.new(title='Active series (total)', type='timeseries') { - options+: { - legend+: { - showLegend: false, - }, - }, - } + - panel.withUnit('short') + - panel.withDescription(||| - Total number of active series across all components. - - An "active series" is a series that prometheus.remote_write recently - received a sample for. Active series are garbage collected whenever a - truncation of the WAL occurs. - |||) + - panel.withPosition({ x: 0, y: 20, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum(agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id=~"$component", url=~"$url"}) - |||, - legendFormat='Series', - ), - ]) - ), - - // Active series (by instance/component) - ( - panel.new(title='Active series (by instance/component)', type='timeseries') + - panel.withUnit('short') + - panel.withDescription(||| - Total number of active series which are currently being tracked by - prometheus.remote_write components, with separate lines for each agent instance. - - An "active series" is a series that prometheus.remote_write recently - received a sample for. Active series are garbage collected whenever a - truncation of the WAL occurs. - |||) + - panel.withPosition({ x: 8, y: 20, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"} - |||, - legendFormat='{{instance}} / {{component_id}}', - ), - ]) - ), - - // Active series (by component) - ( - panel.new(title='Active series (by component)', type='timeseries') + - panel.withUnit('short') + - panel.withDescription(||| - Total number of active series which are currently being tracked by - prometheus.remote_write components, aggregated across all instances. - - An "active series" is a series that prometheus.remote_write recently - received a sample for. Active series are garbage collected whenever a - truncation of the WAL occurs. - |||) + - panel.withPosition({ x: 16, y: 20, w: 8, h: 10 }) + - panel.withQueries([ - panel.newQuery( - expr=||| - sum by (component_id) (agent_wal_storage_active_series{cluster="$cluster", namespace="$namespace", instance=~"$instance", component_id!="", component_id=~"$component", url=~"$url"}) - |||, - legendFormat='{{component_id}}', - ), - ]) - ), - - - ]), -} diff --git a/operations/agent-flow-mixin/dashboards/utils/dashboard.jsonnet b/operations/agent-flow-mixin/dashboards/utils/dashboard.jsonnet index 43dedd3b16b3..63427a46c95d 100644 --- a/operations/agent-flow-mixin/dashboards/utils/dashboard.jsonnet +++ b/operations/agent-flow-mixin/dashboards/utils/dashboard.jsonnet @@ -7,6 +7,7 @@ timezone: 'utc', refresh: '10s', schemaVersion: 36, + graphTooltip: 1, // shared crosshair for all graphs tags: ['grafana-agent-flow-mixin'], templating: { list: [{ diff --git a/operations/agent-flow-mixin/dashboards/utils/panel.jsonnet b/operations/agent-flow-mixin/dashboards/utils/panel.jsonnet index 929af4699a93..93f7260f0923 100644 --- a/operations/agent-flow-mixin/dashboards/utils/panel.jsonnet +++ b/operations/agent-flow-mixin/dashboards/utils/panel.jsonnet @@ -127,4 +127,9 @@ refId: refId, } ), + + newRow(title='', x=0, y=0, w=24, h=1, collapsed=false):: + $.new(title, 'row') + + $.withPosition({x: x, y: y, w: w, h: h }) + + {collapsed: collapsed}, } diff --git a/operations/helm/charts/grafana-agent/CHANGELOG.md b/operations/helm/charts/grafana-agent/CHANGELOG.md index b36d664b4257..f6b9caf9aedb 100644 --- a/operations/helm/charts/grafana-agent/CHANGELOG.md +++ b/operations/helm/charts/grafana-agent/CHANGELOG.md @@ -10,12 +10,17 @@ internal API changes are not present. Unreleased ---------- -### Enhancments +0.31.1 (2024-01-19) +------------------- + +### Enhancements - Add `kubectl.kubernetes.io/default-container: grafana-agent` annotation to allow various tools to choose `grafana-agent` container as default target (@aerfio) - Add support for topology spread constraints in helm chart. (@etiennep) +- Update Grafana Agent version to v0.39.1. (@marctc) + ### Bugfixes - Fix a bug preventing the `.Values.configReloader.image.digest` Helm value to be correctly retrieved. (@claudioscalzo) diff --git a/operations/helm/charts/grafana-agent/Chart.yaml b/operations/helm/charts/grafana-agent/Chart.yaml index 992503befc2a..76675dcf65a7 100644 --- a/operations/helm/charts/grafana-agent/Chart.yaml +++ b/operations/helm/charts/grafana-agent/Chart.yaml @@ -2,8 +2,8 @@ apiVersion: v2 name: grafana-agent description: 'Grafana Agent' type: application -version: 0.31.0 -appVersion: 'v0.39.0' +version: 0.31.1 +appVersion: 'v0.39.1' dependencies: - name: crds diff --git a/operations/helm/charts/grafana-agent/README.md b/operations/helm/charts/grafana-agent/README.md index a84194be1268..4904736239cc 100644 --- a/operations/helm/charts/grafana-agent/README.md +++ b/operations/helm/charts/grafana-agent/README.md @@ -1,6 +1,6 @@ # Grafana Agent Helm chart -![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.31.0](https://img.shields.io/badge/Version-0.31.0-informational?style=flat-square) ![AppVersion: v0.39.0](https://img.shields.io/badge/AppVersion-v0.39.0-informational?style=flat-square) +![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![Version: 0.31.1](https://img.shields.io/badge/Version-0.31.1-informational?style=flat-square) ![AppVersion: v0.39.1](https://img.shields.io/badge/AppVersion-v0.39.1-informational?style=flat-square) Helm chart for deploying [Grafana Agent][] to Kubernetes. diff --git a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml index 5ae321c4d3be..ac5ccd4389b9 100644 --- a/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/additional-serviceaccount-label/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml index f87384dcc4be..fb64c8abfc9b 100644 --- a/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/clustering/grafana-agent/templates/controllers/statefulset.yaml @@ -30,7 +30,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml index a4b56503c5c3..2b36fc32980a 100644 --- a/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/controller-volumes-extra/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml index 6d1864c45cd8..252d5e276878 100644 --- a/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset-hostnetwork/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml index 5ae321c4d3be..ac5ccd4389b9 100644 --- a/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/create-daemonset/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml index 212ec682982c..98397b6c00f4 100644 --- a/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment-autoscaling/grafana-agent/templates/controllers/deployment.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml index 8df5e687bb79..5ccee146ffef 100644 --- a/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/create-deployment/grafana-agent/templates/controllers/deployment.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml index 6b7d51402a87..132210c7c2e3 100644 --- a/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset-autoscaling/grafana-agent/templates/controllers/statefulset.yaml @@ -29,7 +29,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml index 120c40d0a6e0..6f7d11f76fa9 100644 --- a/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml +++ b/operations/helm/tests/create-statefulset/grafana-agent/templates/controllers/statefulset.yaml @@ -30,7 +30,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml index 5ae321c4d3be..ac5ccd4389b9 100644 --- a/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/custom-config/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml index 5ae321c4d3be..ac5ccd4389b9 100644 --- a/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/default-values/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml index 5ae321c4d3be..ac5ccd4389b9 100644 --- a/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/enable-servicemonitor/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml index 048642ef844e..85fb01959587 100644 --- a/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/envFrom/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml index d7ee284a42b9..b6a93df6f9f8 100644 --- a/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/existing-config/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml index a0b21a2e9804..4f45d0fc40ed 100644 --- a/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-env/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml index 9c245c05a471..c07733057e80 100644 --- a/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/extra-ports/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml index ecef2c88c601..c955003a200e 100644 --- a/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/faro-ingress/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index 00ce68be98d9..1f05afd30c17 100644 --- a/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -32,7 +32,7 @@ spec: - name: global-cred containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 35e13217e564..0048873d187e 100644 --- a/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/global-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: quay.io/grafana/agent:v0.39.0 + image: quay.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml index 75f3e5c7c233..0d58403356bb 100644 --- a/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/initcontainers/grafana-agent/templates/controllers/daemonset.yaml @@ -45,7 +45,7 @@ spec: name: geoip containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml index 072b18ff2dea..2ce6b7ad7a21 100644 --- a/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-pullsecrets/grafana-agent/templates/controllers/daemonset.yaml @@ -29,7 +29,7 @@ spec: - name: local-cred containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml index 35e13217e564..0048873d187e 100644 --- a/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/local-image-registry/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: quay.io/grafana/agent:v0.39.0 + image: quay.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml index 67f63b95e3ea..b4c896139945 100644 --- a/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/nodeselectors-and-tolerations/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml index 9cd956ceec05..8fb72ca6f523 100644 --- a/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/pod_annotations/grafana-agent/templates/controllers/daemonset.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml index dc9ae006b732..bb9b6a1ba64d 100644 --- a/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/sidecars/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml b/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml index 29dc3421bc44..f676d9f3e046 100644 --- a/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml +++ b/operations/helm/tests/static-mode/grafana-agent/templates/controllers/daemonset.yaml @@ -27,7 +27,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - -config.file=/etc/agent/config.yaml diff --git a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml index 775d06b23d9c..b28114e09e4d 100644 --- a/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml +++ b/operations/helm/tests/topologyspreadconstraints/grafana-agent/templates/controllers/deployment.yaml @@ -28,7 +28,7 @@ spec: serviceAccountName: grafana-agent containers: - name: grafana-agent - image: docker.io/grafana/agent:v0.39.0 + image: docker.io/grafana/agent:v0.39.1 imagePullPolicy: IfNotPresent args: - run diff --git a/packaging/grafana-agent-flow/windows/install_script.nsis b/packaging/grafana-agent-flow/windows/install_script.nsis index a333807c1196..469a2cbd97b0 100644 --- a/packaging/grafana-agent-flow/windows/install_script.nsis +++ b/packaging/grafana-agent-flow/windows/install_script.nsis @@ -152,10 +152,14 @@ Function InitializeRegistry Pop $0 # Ignore return result ${EndIf} - # Define the environment key, which holds environment variables to pass to the - # service. - nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /v Environment /t REG_MULTI_SZ /d "$Environment"' - Pop $0 # Ignore return result + nsExec::ExecToLog 'Reg.exe query "${REGKEY}" /reg:64 /v Environment' + Pop $0 + ${If} $0 == 1 + # Define the environment key, which holds environment variables to pass to the + # service. + nsExec::ExecToLog 'Reg.exe add "${REGKEY}" /reg:64 /v Environment /t REG_MULTI_SZ /d "$Environment"' + Pop $0 # Ignore return result + ${EndIf} Return FunctionEnd diff --git a/pkg/flow/flow.go b/pkg/flow/flow.go index 76d62bdb9cb2..6e839abda36f 100644 --- a/pkg/flow/flow.go +++ b/pkg/flow/flow.go @@ -27,21 +27,21 @@ // when evaluating the configuration for a component will always be reported as // unhealthy until the next successful evaluation. // -// # Component Evaluation +// # Node Evaluation // -// The process of converting the River block associated with a component into -// the appropriate Go struct is called "component evaluation." +// The process of converting the River block associated with a node into +// the appropriate Go struct is called "node evaluation." // -// Components are only evaluated after all components they reference have been +// Nodes are only evaluated after all nodes they reference have been // evaluated; cyclic dependencies are invalid. // -// If a component updates its Exports at runtime, other components which directly -// or indirectly reference the updated component will have their Arguments +// If a node updates its Exports at runtime, other nodes which directly +// or indirectly reference the updated node will have their Arguments // re-evaluated. // -// The arguments and exports for a component will be left in their last valid -// state if a component shuts down or is given an invalid config. This prevents -// a domino effect of a single failed component taking down other components +// The arguments and exports for a node will be left in their last valid +// state if a node shuts down or is given an invalid config. This prevents +// a domino effect of a single failed node taking down other node // which are otherwise healthy. package flow @@ -49,6 +49,7 @@ import ( "context" "fmt" "sync" + "time" "github.com/grafana/agent/pkg/flow/internal/controller" "github.com/grafana/agent/pkg/flow/internal/worker" @@ -185,9 +186,9 @@ func newController(o controllerOptions) *Flow { Logger: log, TraceProvider: tracer, DataPath: o.DataPath, - OnComponentUpdate: func(cn *controller.ComponentNode) { - // Changed components should be queued for reevaluation. - f.updateQueue.Enqueue(cn) + OnBlockNodeUpdate: func(cn controller.BlockNode) { + // Changed node should be queued for reevaluation. + f.updateQueue.Enqueue(&controller.QueuedNode{Node: cn, LastUpdatedTime: time.Now()}) }, OnExportsChange: o.OnExportsChange, Registerer: o.Reg, @@ -236,8 +237,8 @@ func (f *Flow) Run(ctx context.Context) { return case <-f.updateQueue.Chan(): - // Evaluate all components that have been updated. Sending the entire batch together will improve - // throughput - it prevents the situation where two components have the same dependency, and the first time + // Evaluate all nodes that have been updated. Sending the entire batch together will improve + // throughput - it prevents the situation where two nodes have the same dependency, and the first time // it's picked up by the worker pool and the second time it's enqueued again, resulting in more evaluations. all := f.updateQueue.DequeueAll() f.loader.EvaluateDependants(ctx, all) diff --git a/pkg/flow/flow_components.go b/pkg/flow/flow_components.go index 0899971339b7..a60820c62988 100644 --- a/pkg/flow/flow_components.go +++ b/pkg/flow/flow_components.go @@ -29,7 +29,7 @@ func (f *Flow) GetComponent(id component.ID, opts component.InfoOptions) (*compo return nil, component.ErrComponentNotFound } - cn, ok := node.(*controller.ComponentNode) + cn, ok := node.(controller.ComponentNode) if !ok { return nil, fmt.Errorf("%q is not a component", id) } @@ -63,11 +63,11 @@ func (f *Flow) ListComponents(moduleID string, opts component.InfoOptions) ([]*c return detail, nil } -func (f *Flow) getComponentDetail(cn *controller.ComponentNode, graph *dag.Graph, opts component.InfoOptions) *component.Info { +func (f *Flow) getComponentDetail(cn controller.ComponentNode, graph *dag.Graph, opts component.InfoOptions) *component.Info { var references, referencedBy []string // Skip over any edge which isn't between two component nodes. This is a - // temporary workaround needed until there's athe concept of configuration + // temporary workaround needed until there's a concept of configuration // blocks in the API. // // Without this change, the graph fails to render when a configuration @@ -75,12 +75,12 @@ func (f *Flow) getComponentDetail(cn *controller.ComponentNode, graph *dag.Graph // // TODO(rfratto): add support for config block nodes in the API and UI. for _, dep := range graph.Dependencies(cn) { - if _, ok := dep.(*controller.ComponentNode); ok { + if _, ok := dep.(controller.ComponentNode); ok { references = append(references, dep.NodeID()) } } for _, dep := range graph.Dependants(cn) { - if _, ok := dep.(*controller.ComponentNode); ok { + if _, ok := dep.(controller.ComponentNode); ok { referencedBy = append(referencedBy, dep.NodeID()) } } @@ -90,7 +90,6 @@ func (f *Flow) getComponentDetail(cn *controller.ComponentNode, graph *dag.Graph health component.Health arguments component.Arguments exports component.Exports - debugInfo interface{} ) if opts.GetHealth { @@ -102,14 +101,8 @@ func (f *Flow) getComponentDetail(cn *controller.ComponentNode, graph *dag.Graph if opts.GetExports { exports = cn.Exports() } - if opts.GetDebugInfo { - debugInfo = cn.DebugInfo() - } - - return &component.Info{ - Component: cn.Component(), - ModuleIDs: cn.ModuleIDs(), + componentInfo := &component.Info{ ID: component.ID{ ModuleID: f.opts.ControllerID, LocalID: cn.NodeID(), @@ -119,11 +112,19 @@ func (f *Flow) getComponentDetail(cn *controller.ComponentNode, graph *dag.Graph References: references, ReferencedBy: referencedBy, - Registration: cn.Registration(), - Health: health, + ComponentName: cn.ComponentName(), + Health: health, Arguments: arguments, Exports: exports, - DebugInfo: debugInfo, } + + if builtinComponent, ok := cn.(*controller.BuiltinComponentNode); ok { + componentInfo.Component = builtinComponent.Component() + componentInfo.ModuleIDs = builtinComponent.ModuleIDs() + if opts.GetDebugInfo { + componentInfo.DebugInfo = builtinComponent.DebugInfo() + } + } + return componentInfo } diff --git a/pkg/flow/flow_test.go b/pkg/flow/flow_test.go index 590f97a424f1..42f5a6077e06 100644 --- a/pkg/flow/flow_test.go +++ b/pkg/flow/flow_test.go @@ -59,7 +59,7 @@ func getFields(t *testing.T, g *dag.Graph, nodeID string) (component.Arguments, n := g.GetByID(nodeID) require.NotNil(t, n, "couldn't find node %q in graph", nodeID) - uc := n.(*controller.ComponentNode) + uc := n.(*controller.BuiltinComponentNode) return uc.Arguments(), uc.Exports() } diff --git a/pkg/flow/internal/controller/component_node.go b/pkg/flow/internal/controller/component_node.go new file mode 100644 index 000000000000..330911534f8b --- /dev/null +++ b/pkg/flow/internal/controller/component_node.go @@ -0,0 +1,32 @@ +package controller + +import ( + "github.com/grafana/agent/component" + "github.com/grafana/river/ast" +) + +// ComponentNode is a generic representation of a Flow component. +type ComponentNode interface { + RunnableNode + + // CurrentHealth returns the current health of the component. + CurrentHealth() component.Health + + // Arguments returns the current arguments of the managed component. + Arguments() component.Arguments + + // Exports returns the current set of exports from the managed component. + Exports() component.Exports + + // Label returns the component label. + Label() string + + // ComponentName returns the name of the component. + ComponentName() string + + // ID returns the component ID of the managed component from its River block. + ID() ComponentID + + // UpdateBlock updates the River block used to construct arguments for the managed component. + UpdateBlock(b *ast.BlockStmt) +} diff --git a/pkg/flow/internal/controller/loader.go b/pkg/flow/internal/controller/loader.go index 10a6f37965ab..5ff95ab864f9 100644 --- a/pkg/flow/internal/controller/loader.go +++ b/pkg/flow/internal/controller/loader.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "path" "sync" "time" @@ -40,7 +41,7 @@ type Loader struct { mut sync.RWMutex graph *dag.Graph originalGraph *dag.Graph - componentNodes []*ComponentNode + componentNodes []ComponentNode serviceNodes []*ServiceNode cache *valueCache blocks []*ast.BlockStmt // Most recently loaded blocks, used for writing @@ -135,7 +136,7 @@ func (l *Loader) Apply(args map[string]any, componentBlocks []*ast.BlockStmt, co } var ( - components = make([]*ComponentNode, 0, len(componentBlocks)) + components = make([]ComponentNode, 0, len(componentBlocks)) componentIDs = make([]ComponentID, 0, len(componentBlocks)) services = make([]*ServiceNode, 0, len(l.services)) ) @@ -168,7 +169,7 @@ func (l *Loader) Apply(args map[string]any, componentBlocks []*ast.BlockStmt, co var err error switch n := n.(type) { - case *ComponentNode: + case ComponentNode: components = append(components, n) componentIDs = append(componentIDs, n.ID()) @@ -344,6 +345,17 @@ func (l *Loader) populateServiceNodes(g *dag.Graph, serviceBlocks []*ast.BlockSt // Now, assign blocks to services. for _, block := range serviceBlocks { blockID := BlockComponentID(block).String() + + if l.isModule() { + diags.Add(diag.Diagnostic{ + Severity: diag.SeverityLevelError, + Message: fmt.Sprintf("service blocks not allowed inside a module: %q", blockID), + StartPos: ast.StartPos(block).Position(), + EndPos: ast.EndPos(block).Position(), + }) + continue + } + node := g.GetByID(blockID).(*ServiceNode) // Blocks assigned to services are reset to nil in the previous loop. @@ -423,7 +435,7 @@ func (l *Loader) populateComponentNodes(g *dag.Graph, componentBlocks []*ast.Blo blockMap = make(map[string]*ast.BlockStmt, len(componentBlocks)) ) for _, block := range componentBlocks { - var c *ComponentNode + var c ComponentNode id := BlockComponentID(block).String() if orig, redefined := blockMap[id]; redefined { @@ -440,7 +452,7 @@ func (l *Loader) populateComponentNodes(g *dag.Graph, componentBlocks []*ast.Blo // Check the graph from the previous call to Load to see we can copy an // existing instance of ComponentNode. if exist := l.graph.GetByID(id); exist != nil { - c = exist.(*ComponentNode) + c = exist.(ComponentNode) c.UpdateBlock(block) } else { componentName := block.GetBlockName() @@ -466,7 +478,7 @@ func (l *Loader) populateComponentNodes(g *dag.Graph, componentBlocks []*ast.Blo } // Create a new component - c = NewComponentNode(l.globals, registration, block) + c = NewBuiltinComponentNode(l.globals, registration, block) } g.Add(c) @@ -515,7 +527,7 @@ func (l *Loader) Variables() map[string]interface{} { } // Components returns the current set of loaded components. -func (l *Loader) Components() []*ComponentNode { +func (l *Loader) Components() []ComponentNode { l.mut.RLock() defer l.mut.RUnlock() return l.componentNodes @@ -543,13 +555,13 @@ func (l *Loader) OriginalGraph() *dag.Graph { return l.originalGraph.Clone() } -// EvaluateDependants sends components which depend directly on components in updatedNodes for evaluation to the -// workerPool. It should be called whenever components update their exports. -// It is beneficial to call EvaluateDependants with a batch of components, as it will enqueue the entire batch before +// EvaluateDependants sends nodes which depend directly on nodes in updatedNodes for evaluation to the +// workerPool. It should be called whenever nodes update their exports. +// It is beneficial to call EvaluateDependants with a batch of nodes, as it will enqueue the entire batch before // the worker pool starts to evaluate them, resulting in smaller number of total evaluations when // node updates are frequent. If the worker pool's queue is full, EvaluateDependants will retry with a backoff until // it succeeds or until the ctx is cancelled. -func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*ComponentNode) { +func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*QueuedNode) { if len(updatedNodes) == 0 { return } @@ -565,12 +577,14 @@ func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*Compone l.mut.RLock() defer l.mut.RUnlock() - dependenciesToParentsMap := make(map[dag.Node]*ComponentNode) + dependenciesToParentsMap := make(map[dag.Node]*QueuedNode) for _, parent := range updatedNodes { // Make sure we're in-sync with the current exports of parent. - l.cache.CacheExports(parent.ID(), parent.Exports()) + if componentNode, ok := parent.Node.(ComponentNode); ok { + l.cache.CacheExports(componentNode.ID(), componentNode.Exports()) + } // We collect all nodes directly incoming to parent. - _ = dag.WalkIncomingNodes(l.graph, parent, func(n dag.Node) error { + _ = dag.WalkIncomingNodes(l.graph, parent.Node, func(n dag.Node) error { dependenciesToParentsMap[n] = parent return nil }) @@ -583,7 +597,7 @@ func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*Compone for n, parent := range dependenciesToParentsMap { dependantCtx, span := tracer.Start(spanCtx, "SubmitForEvaluation", trace.WithSpanKind(trace.SpanKindInternal)) span.SetAttributes(attribute.String("node_id", n.NodeID())) - span.SetAttributes(attribute.String("originator_id", parent.NodeID())) + span.SetAttributes(attribute.String("originator_id", parent.Node.NodeID())) // Submit for asynchronous evaluation with retries and backoff. Don't use range variables in the closure. var ( @@ -592,7 +606,8 @@ func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*Compone err error ) for retryBackoff.Ongoing() { - err = l.workerPool.SubmitWithKey(nodeRef.NodeID(), func() { + globalUniqueKey := path.Join(l.globals.ControllerID, nodeRef.NodeID()) + err = l.workerPool.SubmitWithKey(globalUniqueKey, func() { l.concurrentEvalFn(nodeRef, dependantCtx, tracer, parentRef) }) if err != nil { @@ -601,7 +616,7 @@ func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*Compone "and cannot keep up with evaluating components - will retry", "err", err, "node_id", n.NodeID(), - "originator_id", parent.NodeID(), + "originator_id", parent.Node.NodeID(), "retries", retryBackoff.NumRetries(), ) retryBackoff.Wait() @@ -624,9 +639,9 @@ func (l *Loader) EvaluateDependants(ctx context.Context, updatedNodes []*Compone // concurrentEvalFn returns a function that evaluates a node and updates the cache. This function can be submitted to // a worker pool for asynchronous evaluation. -func (l *Loader) concurrentEvalFn(n dag.Node, spanCtx context.Context, tracer trace.Tracer, parent *ComponentNode) { +func (l *Loader) concurrentEvalFn(n dag.Node, spanCtx context.Context, tracer trace.Tracer, parent *QueuedNode) { start := time.Now() - l.cm.dependenciesWaitTime.Observe(time.Since(parent.lastUpdateTime.Load()).Seconds()) + l.cm.dependenciesWaitTime.Observe(time.Since(parent.LastUpdatedTime).Seconds()) _, span := tracer.Start(spanCtx, "EvaluateNode", trace.WithSpanKind(trace.SpanKindInternal)) span.SetAttributes(attribute.String("node_id", n.NodeID())) defer span.End() @@ -687,7 +702,7 @@ func (l *Loader) evaluate(logger log.Logger, bn BlockNode) error { // mut must be held when calling postEvaluate. func (l *Loader) postEvaluate(logger log.Logger, bn BlockNode, err error) error { switch c := bn.(type) { - case *ComponentNode: + case ComponentNode: // Always update the cache both the arguments and exports, since both might // change when a component gets re-evaluated. We also want to cache the arguments and exports in case of an error l.cache.CacheArguments(c.ID(), c.Arguments()) diff --git a/pkg/flow/internal/controller/loader_test.go b/pkg/flow/internal/controller/loader_test.go index e93f757b1a2f..1322db4a69c8 100644 --- a/pkg/flow/internal/controller/loader_test.go +++ b/pkg/flow/internal/controller/loader_test.go @@ -73,7 +73,7 @@ func TestLoader(t *testing.T) { Logger: l, TraceProvider: noop.NewTracerProvider(), DataPath: t.TempDir(), - OnComponentUpdate: func(cn *controller.ComponentNode) { /* no-op */ }, + OnBlockNodeUpdate: func(cn controller.BlockNode) { /* no-op */ }, Registerer: prometheus.NewRegistry(), NewModuleController: func(id string) controller.ModuleController { return nil @@ -207,7 +207,7 @@ func TestScopeWithFailingComponent(t *testing.T) { Logger: l, TraceProvider: noop.NewTracerProvider(), DataPath: t.TempDir(), - OnComponentUpdate: func(cn *controller.ComponentNode) { /* no-op */ }, + OnBlockNodeUpdate: func(cn controller.BlockNode) { /* no-op */ }, Registerer: prometheus.NewRegistry(), NewModuleController: func(id string) controller.ModuleController { return fakeModuleController{} diff --git a/pkg/flow/internal/controller/metrics.go b/pkg/flow/internal/controller/metrics.go index 1c5a558ccc1b..40698529b16d 100644 --- a/pkg/flow/internal/controller/metrics.go +++ b/pkg/flow/internal/controller/metrics.go @@ -112,7 +112,9 @@ func (cc *controllerCollector) Collect(ch chan<- prometheus.Metric) { for _, component := range cc.l.Components() { health := component.CurrentHealth().Health.String() componentsByHealth[health]++ - component.registry.Collect(ch) + if builtinComponent, ok := component.(*BuiltinComponentNode); ok { + builtinComponent.registry.Collect(ch) + } } for health, count := range componentsByHealth { diff --git a/pkg/flow/internal/controller/node_component.go b/pkg/flow/internal/controller/node_builtin_component.go similarity index 82% rename from pkg/flow/internal/controller/node_component.go rename to pkg/flow/internal/controller/node_builtin_component.go index b99597809d4b..6de912418436 100644 --- a/pkg/flow/internal/controller/node_component.go +++ b/pkg/flow/internal/controller/node_builtin_component.go @@ -21,7 +21,6 @@ import ( "github.com/grafana/river/vm" "github.com/prometheus/client_golang/prometheus" "go.opentelemetry.io/otel/trace" - "go.uber.org/atomic" ) // ComponentID is a fully-qualified name of a component. Each element in @@ -60,13 +59,13 @@ func (id ComponentID) Equals(other ComponentID) bool { // DialFunc is a function to establish a network connection. type DialFunc func(ctx context.Context, network, address string) (net.Conn, error) -// ComponentGlobals are used by ComponentNodes to build managed components. All -// ComponentNodes should use the same ComponentGlobals. +// ComponentGlobals are used by BuiltinComponentNodes to build managed components. All +// BuiltinComponentNodes should use the same ComponentGlobals. type ComponentGlobals struct { Logger *logging.Logger // Logger shared between all managed components. TraceProvider trace.TracerProvider // Tracer shared between all managed components. DataPath string // Shared directory where component data may be stored - OnComponentUpdate func(cn *ComponentNode) // Informs controller that we need to reevaluate + OnBlockNodeUpdate func(cn BlockNode) // Informs controller that we need to reevaluate OnExportsChange func(exports map[string]any) // Invoked when the managed component updated its exports Registerer prometheus.Registerer // Registerer for serving agent and component metrics ControllerID string // ID of controller. @@ -74,12 +73,12 @@ type ComponentGlobals struct { GetServiceData func(name string) (interface{}, error) // Get data for a service. } -// ComponentNode is a controller node which manages a user-defined component. +// BuiltinComponentNode is a controller node which manages a builtin component. // -// ComponentNode manages the underlying component and caches its current -// arguments and exports. ComponentNode manages the arguments for the component +// BuiltinComponentNode manages the underlying component and caches its current +// arguments and exports. BuiltinComponentNode manages the arguments for the component // from a River block. -type ComponentNode struct { +type BuiltinComponentNode struct { id ComponentID globalID string label string @@ -90,8 +89,7 @@ type ComponentNode struct { registry *prometheus.Registry exportsType reflect.Type moduleController ModuleController - OnComponentUpdate func(cn *ComponentNode) // Informs controller that we need to reevaluate - lastUpdateTime atomic.Time + OnBlockNodeUpdate func(cn BlockNode) // Informs controller that we need to reevaluate mut sync.RWMutex block *ast.BlockStmt // Current River block to derive args from @@ -111,11 +109,11 @@ type ComponentNode struct { exports component.Exports // Evaluated exports for the managed component } -var _ BlockNode = (*ComponentNode)(nil) +var _ ComponentNode = (*BuiltinComponentNode)(nil) -// NewComponentNode creates a new ComponentNode from an initial ast.BlockStmt. +// NewBuiltinComponentNode creates a new BuiltinComponentNode from an initial ast.BlockStmt. // The underlying managed component isn't created until Evaluate is called. -func NewComponentNode(globals ComponentGlobals, reg component.Registration, b *ast.BlockStmt) *ComponentNode { +func NewBuiltinComponentNode(globals ComponentGlobals, reg component.Registration, b *ast.BlockStmt) *BuiltinComponentNode { var ( id = BlockComponentID(b) nodeID = id.String() @@ -137,7 +135,7 @@ func NewComponentNode(globals ComponentGlobals, reg component.Registration, b *a globalID = path.Join(globals.ControllerID, nodeID) } - cn := &ComponentNode{ + cn := &BuiltinComponentNode{ id: id, globalID: globalID, label: b.Label, @@ -146,7 +144,7 @@ func NewComponentNode(globals ComponentGlobals, reg component.Registration, b *a reg: reg, exportsType: getExportsType(reg), moduleController: globals.NewModuleController(globalID), - OnComponentUpdate: globals.OnComponentUpdate, + OnBlockNodeUpdate: globals.OnBlockNodeUpdate, block: b, eval: vm.New(b.Body), @@ -163,7 +161,7 @@ func NewComponentNode(globals ComponentGlobals, reg component.Registration, b *a return cn } -func getManagedOptions(globals ComponentGlobals, cn *ComponentNode) component.Options { +func getManagedOptions(globals ComponentGlobals, cn *BuiltinComponentNode) component.Options { cn.registry = prometheus.NewRegistry() return component.Options{ ID: cn.globalID, @@ -192,37 +190,37 @@ func getExportsType(reg component.Registration) reflect.Type { } // Registration returns the original registration of the component. -func (cn *ComponentNode) Registration() component.Registration { return cn.reg } +func (cn *BuiltinComponentNode) Registration() component.Registration { return cn.reg } // Component returns the instance of the managed component. Component may be -// nil if the ComponentNode has not been successfully evaluated yet. -func (cn *ComponentNode) Component() component.Component { +// nil if the BuiltinComponentNode has not been successfully evaluated yet. +func (cn *BuiltinComponentNode) Component() component.Component { cn.mut.RLock() defer cn.mut.RUnlock() return cn.managed } // ID returns the component ID of the managed component from its River block. -func (cn *ComponentNode) ID() ComponentID { return cn.id } +func (cn *BuiltinComponentNode) ID() ComponentID { return cn.id } // Label returns the label for the block or "" if none was specified. -func (cn *ComponentNode) Label() string { return cn.label } +func (cn *BuiltinComponentNode) Label() string { return cn.label } // ComponentName returns the component's type, i.e. `local.file.test` returns `local.file`. -func (cn *ComponentNode) ComponentName() string { return cn.componentName } +func (cn *BuiltinComponentNode) ComponentName() string { return cn.componentName } // NodeID implements dag.Node and returns the unique ID for this node. The // NodeID is the string representation of the component's ID from its River // block. -func (cn *ComponentNode) NodeID() string { return cn.nodeID } +func (cn *BuiltinComponentNode) NodeID() string { return cn.nodeID } // UpdateBlock updates the River block used to construct arguments for the // managed component. The new block isn't used until the next time Evaluate is // invoked. // // UpdateBlock will panic if the block does not match the component ID of the -// ComponentNode. -func (cn *ComponentNode) UpdateBlock(b *ast.BlockStmt) { +// BuiltinComponentNode. +func (cn *BuiltinComponentNode) UpdateBlock(b *ast.BlockStmt) { if !BlockComponentID(b).Equals(cn.id) { panic("UpdateBlock called with an River block with a different component ID") } @@ -239,7 +237,7 @@ func (cn *ComponentNode) UpdateBlock(b *ast.BlockStmt) { // // Evaluate will return an error if the River block cannot be evaluated or if // decoding to arguments fails. -func (cn *ComponentNode) Evaluate(scope *vm.Scope) error { +func (cn *BuiltinComponentNode) Evaluate(scope *vm.Scope) error { err := cn.evaluate(scope) switch err { @@ -252,7 +250,7 @@ func (cn *ComponentNode) Evaluate(scope *vm.Scope) error { return err } -func (cn *ComponentNode) evaluate(scope *vm.Scope) error { +func (cn *BuiltinComponentNode) evaluate(scope *vm.Scope) error { cn.mut.Lock() defer cn.mut.Unlock() @@ -299,7 +297,7 @@ func (cn *ComponentNode) evaluate(scope *vm.Scope) error { // // Run will immediately return ErrUnevaluated if Evaluate has never been called // successfully. Otherwise, Run will return nil. -func (cn *ComponentNode) Run(ctx context.Context) error { +func (cn *BuiltinComponentNode) Run(ctx context.Context) error { cn.mut.RLock() managed := cn.managed cn.mut.RUnlock() @@ -325,19 +323,19 @@ func (cn *ComponentNode) Run(ctx context.Context) error { return err } -// ErrUnevaluated is returned if ComponentNode.Run is called before a managed +// ErrUnevaluated is returned if BuiltinComponentNode.Run is called before a managed // component is built. var ErrUnevaluated = errors.New("managed component not built") // Arguments returns the current arguments of the managed component. -func (cn *ComponentNode) Arguments() component.Arguments { +func (cn *BuiltinComponentNode) Arguments() component.Arguments { cn.mut.RLock() defer cn.mut.RUnlock() return cn.args } // Block implements BlockNode and returns the current block of the managed component. -func (cn *ComponentNode) Block() *ast.BlockStmt { +func (cn *BuiltinComponentNode) Block() *ast.BlockStmt { cn.mut.RLock() defer cn.mut.RUnlock() return cn.block @@ -345,7 +343,7 @@ func (cn *ComponentNode) Block() *ast.BlockStmt { // Exports returns the current set of exports from the managed component. // Exports returns nil if the managed component does not have exports. -func (cn *ComponentNode) Exports() component.Exports { +func (cn *BuiltinComponentNode) Exports() component.Exports { cn.exportsMut.RLock() defer cn.exportsMut.RUnlock() return cn.exports @@ -353,7 +351,7 @@ func (cn *ComponentNode) Exports() component.Exports { // setExports is called whenever the managed component updates. e must be the // same type as the registered exports type of the managed component. -func (cn *ComponentNode) setExports(e component.Exports) { +func (cn *BuiltinComponentNode) setExports(e component.Exports) { if cn.exportsType == nil { panic(fmt.Sprintf("Component %s called OnStateChange but never registered an Exports type", cn.nodeID)) } @@ -379,19 +377,18 @@ func (cn *ComponentNode) setExports(e component.Exports) { if changed { // Inform the controller that we have new exports. - cn.lastUpdateTime.Store(time.Now()) - cn.OnComponentUpdate(cn) + cn.OnBlockNodeUpdate(cn) } } -// CurrentHealth returns the current health of the ComponentNode. +// CurrentHealth returns the current health of the BuiltinComponentNode. // -// The health of a ComponentNode is determined by combining: +// The health of a BuiltinComponentNode is determined by combining: // // 1. Health from the call to Run(). // 2. Health from the last call to Evaluate(). // 3. Health reported from the component. -func (cn *ComponentNode) CurrentHealth() component.Health { +func (cn *BuiltinComponentNode) CurrentHealth() component.Health { cn.healthMut.RLock() defer cn.healthMut.RUnlock() @@ -409,7 +406,7 @@ func (cn *ComponentNode) CurrentHealth() component.Health { } // DebugInfo returns debugging information from the managed component (if any). -func (cn *ComponentNode) DebugInfo() interface{} { +func (cn *BuiltinComponentNode) DebugInfo() interface{} { cn.mut.RLock() defer cn.mut.RUnlock() @@ -421,7 +418,7 @@ func (cn *ComponentNode) DebugInfo() interface{} { // setEvalHealth sets the internal health from a call to Evaluate. See Health // for information on how overall health is calculated. -func (cn *ComponentNode) setEvalHealth(t component.HealthType, msg string) { +func (cn *BuiltinComponentNode) setEvalHealth(t component.HealthType, msg string) { cn.healthMut.Lock() defer cn.healthMut.Unlock() @@ -434,7 +431,7 @@ func (cn *ComponentNode) setEvalHealth(t component.HealthType, msg string) { // setRunHealth sets the internal health from a call to Run. See Health for // information on how overall health is calculated. -func (cn *ComponentNode) setRunHealth(t component.HealthType, msg string) { +func (cn *BuiltinComponentNode) setRunHealth(t component.HealthType, msg string) { cn.healthMut.Lock() defer cn.healthMut.Unlock() @@ -447,6 +444,6 @@ func (cn *ComponentNode) setRunHealth(t component.HealthType, msg string) { // ModuleIDs returns the current list of modules that this component is // managing. -func (cn *ComponentNode) ModuleIDs() []string { +func (cn *BuiltinComponentNode) ModuleIDs() []string { return cn.moduleController.ModuleIDs() } diff --git a/pkg/flow/internal/controller/node_component_test.go b/pkg/flow/internal/controller/node_builtin_component_test.go similarity index 93% rename from pkg/flow/internal/controller/node_component_test.go rename to pkg/flow/internal/controller/node_builtin_component_test.go index 6eb46f004601..6a1165b2cc6d 100644 --- a/pkg/flow/internal/controller/node_component_test.go +++ b/pkg/flow/internal/controller/node_builtin_component_test.go @@ -14,7 +14,7 @@ func TestGlobalID(t *testing.T) { NewModuleController: func(id string) ModuleController { return nil }, - }, &ComponentNode{ + }, &BuiltinComponentNode{ nodeID: "local.id", globalID: "module.file/local.id", }) @@ -28,7 +28,7 @@ func TestLocalID(t *testing.T) { NewModuleController: func(id string) ModuleController { return nil }, - }, &ComponentNode{ + }, &BuiltinComponentNode{ nodeID: "local.id", globalID: "local.id", }) diff --git a/pkg/flow/internal/controller/node_service.go b/pkg/flow/internal/controller/node_service.go index 42bb73cfcc81..8d3a3303ea4e 100644 --- a/pkg/flow/internal/controller/node_service.go +++ b/pkg/flow/internal/controller/node_service.go @@ -24,10 +24,7 @@ type ServiceNode struct { args component.Arguments // Evaluated arguments for the managed component } -var ( - _ BlockNode = (*ServiceNode)(nil) - _ RunnableNode = (*ServiceNode)(nil) -) +var _ RunnableNode = (*ServiceNode)(nil) // NewServiceNode creates a new instance of a ServiceNode from an instance of a // Service. The provided host is used when running the service. diff --git a/pkg/flow/internal/controller/queue.go b/pkg/flow/internal/controller/queue.go index a8cd1b5bae05..65c1448573b9 100644 --- a/pkg/flow/internal/controller/queue.go +++ b/pkg/flow/internal/controller/queue.go @@ -2,32 +2,37 @@ package controller import ( "sync" + "time" ) -// Queue is a thread-safe, insertion-ordered set of components. +// Queue is a thread-safe, insertion-ordered set of nodes. // -// Queue is intended for tracking components that have updated their Exports -// for later reevaluation. +// Queue is intended for tracking nodes that have been updated for later reevaluation. type Queue struct { mut sync.Mutex - queuedSet map[*ComponentNode]struct{} - queuedOrder []*ComponentNode + queuedSet map[*QueuedNode]struct{} + queuedOrder []*QueuedNode updateCh chan struct{} } +type QueuedNode struct { + Node BlockNode + LastUpdatedTime time.Time +} + // NewQueue returns a new queue. func NewQueue() *Queue { return &Queue{ updateCh: make(chan struct{}, 1), - queuedSet: make(map[*ComponentNode]struct{}), - queuedOrder: make([]*ComponentNode, 0), + queuedSet: make(map[*QueuedNode]struct{}), + queuedOrder: make([]*QueuedNode, 0), } } -// Enqueue inserts a new component into the Queue. Enqueue is a no-op if the -// component is already in the Queue. -func (q *Queue) Enqueue(c *ComponentNode) { +// Enqueue inserts a new BlockNode into the Queue. Enqueue is a no-op if the +// BlockNode is already in the Queue. +func (q *Queue) Enqueue(c *QueuedNode) { q.mut.Lock() defer q.mut.Unlock() @@ -47,14 +52,14 @@ func (q *Queue) Enqueue(c *ComponentNode) { // Chan returns a channel which is written to when the queue is non-empty. func (q *Queue) Chan() <-chan struct{} { return q.updateCh } -// DequeueAll removes all components from the queue and returns them. -func (q *Queue) DequeueAll() []*ComponentNode { +// DequeueAll removes all BlockNode from the queue and returns them. +func (q *Queue) DequeueAll() []*QueuedNode { q.mut.Lock() defer q.mut.Unlock() all := q.queuedOrder - q.queuedOrder = make([]*ComponentNode, 0) - q.queuedSet = make(map[*ComponentNode]struct{}) + q.queuedOrder = make([]*QueuedNode, 0) + q.queuedSet = make(map[*QueuedNode]struct{}) return all } diff --git a/pkg/flow/internal/controller/queue_test.go b/pkg/flow/internal/controller/queue_test.go index c93fb14ef8fc..c0a7cd930675 100644 --- a/pkg/flow/internal/controller/queue_test.go +++ b/pkg/flow/internal/controller/queue_test.go @@ -9,7 +9,7 @@ import ( ) func TestEnqueueDequeue(t *testing.T) { - tn := &ComponentNode{} + tn := &QueuedNode{} q := NewQueue() q.Enqueue(tn) require.Lenf(t, q.queuedSet, 1, "queue should be 1") @@ -26,7 +26,7 @@ func TestDequeue_Empty(t *testing.T) { } func TestDequeue_InOrder(t *testing.T) { - c1, c2, c3 := &ComponentNode{}, &ComponentNode{}, &ComponentNode{} + c1, c2, c3 := &QueuedNode{}, &QueuedNode{}, &QueuedNode{} q := NewQueue() q.Enqueue(c1) q.Enqueue(c2) @@ -41,7 +41,7 @@ func TestDequeue_InOrder(t *testing.T) { } func TestDequeue_NoDuplicates(t *testing.T) { - c1, c2 := &ComponentNode{}, &ComponentNode{} + c1, c2 := &QueuedNode{}, &QueuedNode{} q := NewQueue() q.Enqueue(c1) q.Enqueue(c1) @@ -58,7 +58,7 @@ func TestDequeue_NoDuplicates(t *testing.T) { } func TestEnqueue_ChannelNotification(t *testing.T) { - c1 := &ComponentNode{} + c1 := &QueuedNode{} q := NewQueue() notificationsCount := atomic.Int32{} diff --git a/pkg/flow/internal/controller/scheduler.go b/pkg/flow/internal/controller/scheduler.go index fe0576d49da6..10993aa80194 100644 --- a/pkg/flow/internal/controller/scheduler.go +++ b/pkg/flow/internal/controller/scheduler.go @@ -6,9 +6,9 @@ import ( "sync" ) -// RunnableNode is any dag.Node which can also be run. +// RunnableNode is any BlockNode which can also be run. type RunnableNode interface { - NodeID() string + BlockNode Run(ctx context.Context) error } diff --git a/pkg/flow/internal/controller/scheduler_test.go b/pkg/flow/internal/controller/scheduler_test.go index 644423f43699..c965d99c9db7 100644 --- a/pkg/flow/internal/controller/scheduler_test.go +++ b/pkg/flow/internal/controller/scheduler_test.go @@ -7,6 +7,8 @@ import ( "github.com/grafana/agent/component" "github.com/grafana/agent/pkg/flow/internal/controller" + "github.com/grafana/river/ast" + "github.com/grafana/river/vm" "github.com/stretchr/testify/require" ) @@ -93,8 +95,10 @@ type fakeRunnable struct { var _ controller.RunnableNode = fakeRunnable{} -func (fr fakeRunnable) NodeID() string { return fr.ID } -func (fr fakeRunnable) Run(ctx context.Context) error { return fr.Component.Run(ctx) } +func (fr fakeRunnable) NodeID() string { return fr.ID } +func (fr fakeRunnable) Run(ctx context.Context) error { return fr.Component.Run(ctx) } +func (fr fakeRunnable) Block() *ast.BlockStmt { return nil } +func (fr fakeRunnable) Evaluate(scope *vm.Scope) error { return nil } type mockComponent struct { RunFunc func(ctx context.Context) error diff --git a/pkg/flow/module_caching_test.go b/pkg/flow/module_eval_test.go similarity index 72% rename from pkg/flow/module_caching_test.go rename to pkg/flow/module_eval_test.go index e22e0583cbda..8b6d02c7c7d4 100644 --- a/pkg/flow/module_caching_test.go +++ b/pkg/flow/module_eval_test.go @@ -1,7 +1,7 @@ package flow_test -// This file contains tests which verify that the Flow controller correctly updates and caches modules' arguments -// and exports in presence of multiple components. +// This file contains tests which verify that the Flow controller correctly evaluates and updates modules, including +// the module's arguments and exports. import ( "context" @@ -141,6 +141,74 @@ func TestUpdates_ThroughModule(t *testing.T) { }, 3*time.Second, 10*time.Millisecond) } +func TestUpdates_TwoModules_SameCompNames(t *testing.T) { + // We use this module in a Flow config below. + module := ` + testcomponents.count "inc" { + frequency = "1ms" + max = 100 + } + + testcomponents.passthrough "pt" { + input = testcomponents.count.inc.count + lag = "1ms" + } + + export "output" { + value = testcomponents.passthrough.pt.output + } +` + + // We run two modules with above body, which will have the same component names, but different module IDs. + config := ` + module.string "test_1" { + content = ` + strconv.Quote(module) + ` + } + + testcomponents.summation "sum_1" { + input = module.string.test_1.exports.output + } + + module.string "test_2" { + content = ` + strconv.Quote(module) + ` + } + + testcomponents.summation "sum_2" { + input = module.string.test_2.exports.output + } +` + + ctrl := flow.New(testOptions(t)) + f, err := flow.ParseSource(t.Name(), []byte(config)) + require.NoError(t, err) + require.NotNil(t, f) + + err = ctrl.LoadSource(f, nil) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan struct{}) + go func() { + ctrl.Run(ctx) + close(done) + }() + defer func() { + cancel() + <-done + }() + + // Verify updates propagated correctly. + require.Eventually(t, func() bool { + export := getExport[testcomponents.SummationExports](t, ctrl, "", "testcomponents.summation.sum_1") + return export.LastAdded == 100 + }, 3*time.Second, 10*time.Millisecond) + + require.Eventually(t, func() bool { + export := getExport[testcomponents.SummationExports](t, ctrl, "", "testcomponents.summation.sum_2") + return export.LastAdded == 100 + }, 3*time.Second, 10*time.Millisecond) +} + func testOptions(t *testing.T) flow.Options { t.Helper() s, err := logging.New(os.Stderr, logging.DefaultOptions) diff --git a/pkg/flow/module_fail_test.go b/pkg/flow/module_fail_test.go index 28fb0923a892..071c36813be3 100644 --- a/pkg/flow/module_fail_test.go +++ b/pkg/flow/module_fail_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/grafana/agent/pkg/flow/componenttest" + "github.com/grafana/agent/pkg/flow/internal/controller" "github.com/stretchr/testify/require" ) @@ -23,7 +24,7 @@ func TestIDRemovalIfFailedToLoad(t *testing.T) { go f.Run(ctx) var t1 *componenttest.TestFailModule require.Eventually(t, func() bool { - t1 = f.loader.Components()[0].Component().(*componenttest.TestFailModule) + t1 = f.loader.Components()[0].(*controller.BuiltinComponentNode).Component().(*componenttest.TestFailModule) return t1 != nil }, 10*time.Second, 100*time.Millisecond) require.Eventually(t, func() bool { diff --git a/pkg/flow/module_test.go b/pkg/flow/module_test.go index 4e4ddb9faaa8..c5f4417c84c3 100644 --- a/pkg/flow/module_test.go +++ b/pkg/flow/module_test.go @@ -7,8 +7,10 @@ import ( "time" "github.com/grafana/agent/component" + "github.com/grafana/agent/pkg/flow/internal/controller" "github.com/grafana/agent/pkg/flow/internal/worker" "github.com/grafana/agent/pkg/flow/logging" + "github.com/grafana/agent/service" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" ) @@ -43,6 +45,9 @@ const exportDummy = ` value = "bob" }` +const serviceConfig = ` + testservice {}` + func TestModule(t *testing.T) { tt := []struct { name string @@ -72,6 +77,12 @@ func TestModule(t *testing.T) { exportModuleContent: exportStringConfig, expectedErrorContains: "tracing block not allowed inside a module", }, + { + name: "Service blocks not allowed in module config", + argumentModuleContent: argumentConfig + serviceConfig, + exportModuleContent: exportStringConfig, + expectedErrorContains: "service blocks not allowed inside a module: \"testservice\"", + }, { name: "Argument not defined in module source", argumentModuleContent: `argument "different_argument" {}`, @@ -245,12 +256,19 @@ func testModuleControllerOptions(t *testing.T) *moduleControllerOptions { s, err := logging.New(os.Stderr, logging.DefaultOptions) require.NoError(t, err) + services := []service.Service{ + &testService{}, + } + + serviceMap := controller.NewServiceMap(services) + return &moduleControllerOptions{ Logger: s, DataPath: t.TempDir(), Reg: prometheus.NewRegistry(), ModuleRegistry: newModuleRegistry(), WorkerPool: worker.NewFixedWorkerPool(1, 100), + ServiceMap: serviceMap, } } @@ -307,3 +325,23 @@ func (t *testModule) Run(ctx context.Context) error { func (t *testModule) Update(_ component.Arguments) error { return nil } + +type testService struct{} + +func (t *testService) Definition() service.Definition { + return service.Definition{ + Name: "testservice", + } +} + +func (t *testService) Run(ctx context.Context, host service.Host) error { + return nil +} + +func (t *testService) Update(newConfig any) error { + return nil +} + +func (t *testService) Data() any { + return nil +} diff --git a/pkg/integrations/v2/register.go b/pkg/integrations/v2/register.go index 0deb356c6cb9..52b26b7794c4 100644 --- a/pkg/integrations/v2/register.go +++ b/pkg/integrations/v2/register.go @@ -228,7 +228,7 @@ func MarshalYAML(v interface{}) (interface{}, error) { panic(fmt.Sprintf("config not registered: %T", data)) } - if _, exists := uniqueSingletons[fieldName]; exists { + if _, exists := uniqueSingletons[fieldName]; exists && integrationType == TypeSingleton { return nil, fmt.Errorf("integration %q may not be defined more than once", fieldName) } uniqueSingletons[fieldName] = struct{}{} diff --git a/pkg/integrations/v2/register_test.go b/pkg/integrations/v2/register_test.go index 2e883131144e..df99cf50b481 100644 --- a/pkg/integrations/v2/register_test.go +++ b/pkg/integrations/v2/register_test.go @@ -186,6 +186,39 @@ func TestIntegrationRegistration_Marshal_MultipleSingleton(t *testing.T) { require.EqualError(t, err, `integration "test" may not be defined more than once`) } +func TestIntegrationRegistration_Marshal_Multiplex(t *testing.T) { + setRegistered(t, map[Config]Type{ + &testIntegrationA{}: TypeMultiplex, + &testIntegrationB{}: TypeMultiplex, + }) + + // Generate an invalid config, which has two instances of a Singleton + // integration. + input := testFullConfig{ + Name: "John Doe", + Duration: 500 * time.Millisecond, + Default: 12345, + Configs: []Config{ + &testIntegrationA{Text: "Hello, world!", Truth: true}, + &testIntegrationA{Text: "Hello again!", Truth: true}, + }, + } + + expectedCfg := `name: John Doe +duration: 500ms +default: 12345 +test_configs: +- text: Hello, world! + truth: true +- text: Hello again! + truth: true +` + + cfg, err := yaml.Marshal(&input) + require.NoError(t, err) + require.Equal(t, expectedCfg, string(cfg)) +} + type legacyConfig struct { Text string `yaml:"text"` } diff --git a/pkg/mimir/client/client.go b/pkg/mimir/client/client.go index 9145e7b56e41..b6ed18068857 100644 --- a/pkg/mimir/client/client.go +++ b/pkg/mimir/client/client.go @@ -20,22 +20,18 @@ import ( "github.com/prometheus/prometheus/model/rulefmt" ) -const ( - rulerAPIPath = "/prometheus/config/v1/rules" - legacyAPIPath = "/api/v1/rules" -) - var ( - ErrNoConfig = errors.New("No config exists for this user") + ErrNoConfig = errors.New("no config exists for this user") ErrResourceNotFound = errors.New("requested resource not found") ) // Config is used to configure a MimirClient. type Config struct { - ID string - Address string - UseLegacyRoutes bool - HTTPClientConfig config.HTTPClientConfig + ID string + Address string + UseLegacyRoutes bool + HTTPClientConfig config.HTTPClientConfig + PrometheusHTTPPrefix string } type Interface interface { @@ -65,9 +61,12 @@ func New(logger log.Logger, cfg Config, timingHistogram *prometheus.HistogramVec return nil, err } - path := rulerAPIPath + path, err := url.JoinPath(cfg.PrometheusHTTPPrefix, "/config/v1/rules") + if err != nil { + return nil, err + } if cfg.UseLegacyRoutes { - path = legacyAPIPath + path = "/api/v1/rules" } collector := instrument.NewHistogramCollector(timingHistogram) diff --git a/pkg/mimir/client/client_test.go b/pkg/mimir/client/client_test.go index 5ef8a373519b..262e9918a9bf 100644 --- a/pkg/mimir/client/client_test.go +++ b/pkg/mimir/client/client_test.go @@ -79,6 +79,13 @@ func TestBuildURL(t *testing.T) { url: "http://mimir.local/apathto", resultURL: "http://mimir.local/apathto/prometheus/config/v1/rules/last-char-slash%2F", }, + { + name: "builds the correct URL with a customized prometheus_http_prefix", + path: "/mimir/config/v1/rules", + method: http.MethodPost, + url: "http://mimir.local/", + resultURL: "http://mimir.local/mimir/config/v1/rules", + }, } for _, tt := range tc { diff --git a/pkg/mimir/client/rules_test.go b/pkg/mimir/client/rules_test.go index e2ab18a17839..a4ccdde6e509 100644 --- a/pkg/mimir/client/rules_test.go +++ b/pkg/mimir/client/rules_test.go @@ -22,49 +22,63 @@ func TestMimirClient_X(t *testing.T) { })) defer ts.Close() - client, err := New(log.NewNopLogger(), Config{ - Address: ts.URL, - }, prometheus.NewHistogramVec(prometheus.HistogramOpts{}, instrument.HistogramCollectorBuckets)) - require.NoError(t, err) - for _, tc := range []struct { - test string - namespace string - name string - expURLPath string + test string + namespace string + name string + prometheusHTTPPrefix string + expURLPath string }{ { - test: "regular-characters", - namespace: "my-namespace", - name: "my-name", - expURLPath: "/prometheus/config/v1/rules/my-namespace/my-name", + test: "regular-characters", + namespace: "my-namespace", + name: "my-name", + expURLPath: "/prometheus/config/v1/rules/my-namespace/my-name", + prometheusHTTPPrefix: "/prometheus", + }, + { + test: "special-characters-spaces", + namespace: "My: Namespace", + name: "My: Name", + prometheusHTTPPrefix: "/prometheus", + expURLPath: "/prometheus/config/v1/rules/My:%20Namespace/My:%20Name", }, { - test: "special-characters-spaces", - namespace: "My: Namespace", - name: "My: Name", - expURLPath: "/prometheus/config/v1/rules/My:%20Namespace/My:%20Name", + test: "special-characters-slashes", + namespace: "My/Namespace", + name: "My/Name", + prometheusHTTPPrefix: "/prometheus", + expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/My%2FName", }, { - test: "special-characters-slashes", - namespace: "My/Namespace", - name: "My/Name", - expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/My%2FName", + test: "special-characters-slash-first", + namespace: "My/Namespace", + name: "/first-char-slash", + prometheusHTTPPrefix: "/prometheus", + expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/%2Ffirst-char-slash", }, { - test: "special-characters-slash-first", - namespace: "My/Namespace", - name: "/first-char-slash", - expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/%2Ffirst-char-slash", + test: "special-characters-slash-last", + namespace: "My/Namespace", + name: "last-char-slash/", + prometheusHTTPPrefix: "/prometheus", + expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/last-char-slash%2F", }, { - test: "special-characters-slash-last", - namespace: "My/Namespace", - name: "last-char-slash/", - expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/last-char-slash%2F", + test: "regular-characters-with-customized-prometheus-http-prefix", + namespace: "My/Namespace", + name: "last-char-slash/", + prometheusHTTPPrefix: "/mimir", + expURLPath: "/mimir/config/v1/rules/My%2FNamespace/last-char-slash%2F", }, } { t.Run(tc.test, func(t *testing.T) { + client, err := New(log.NewNopLogger(), Config{ + Address: ts.URL, + PrometheusHTTPPrefix: tc.prometheusHTTPPrefix, + }, prometheus.NewHistogramVec(prometheus.HistogramOpts{}, instrument.HistogramCollectorBuckets)) + require.NoError(t, err) + ctx := context.Background() require.NoError(t, client.DeleteRuleGroup(ctx, tc.namespace, tc.name)) diff --git a/pkg/operator/defaults.go b/pkg/operator/defaults.go index bc9cff6ab04e..25e4e05e986d 100644 --- a/pkg/operator/defaults.go +++ b/pkg/operator/defaults.go @@ -2,7 +2,7 @@ package operator // Supported versions of the Grafana Agent. var ( - DefaultAgentVersion = "v0.39.0" + DefaultAgentVersion = "v0.39.2" DefaultAgentBaseImage = "grafana/agent" DefaultAgentImage = DefaultAgentBaseImage + ":" + DefaultAgentVersion ) diff --git a/tools/gen-versioned-files/agent-version.txt b/tools/gen-versioned-files/agent-version.txt index 999889149ec2..76b2d23e8697 100644 --- a/tools/gen-versioned-files/agent-version.txt +++ b/tools/gen-versioned-files/agent-version.txt @@ -1 +1 @@ -v0.39.0 \ No newline at end of file +v0.39.2 \ No newline at end of file