diff --git a/CHANGELOG.md b/CHANGELOG.md index 285f0d20f501..2ca244f6c638 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,10 @@ internal API changes are not present. Main (unreleased) ----------------- +### Security fixes + +- Fix CVE-2023-47108 by updating `otelgrpc` from v0.45.0 to v0.46.0. (@hainenber) + ### Features - Agent Management: Introduce support for templated configuration. (@jcreixell) @@ -31,6 +35,11 @@ Main (unreleased) Previously, only `remote.*` and `local.*` components could be referenced without a circular dependency. (@rfratto) +- Add a `resource_to_telemetry_conversion` argument to `otelcol.exporter.prometheus` + for converting resource attributes to Prometheus labels. (@hainenber) + +- `pyroscope.ebpf` support python on arm64 platforms. (@korniltsev) + ### Bugfixes - Permit `X-Faro-Session-ID` header in CORS requests for the `faro.receiver` @@ -38,9 +47,13 @@ Main (unreleased) (@cedricziel) - Fix issue with windows_exporter defaults not being set correctly. (@mattdurham) + +- Fix agent crash when process null OTel's fan out consumers. (@hainenber) + +- Fix issue in `prometheus.operator.*` where targets would be dropped if two crds share a common prefix in their names. (@Paul424, @captncraig) - Fix issue where `convert` command would generate incorrect Flow Mode config - when provided `promtail` configuration that uses `docker_sd_configs` (@thampiotr) + when provided `promtail` configuration that uses `docker_sd_configs` (@thampiotr) v0.38.0 (2023-11-21) -------------------- @@ -123,7 +136,7 @@ v0.38.0 (2023-11-21) - Make component list sortable in web UI. (@hainenber) - Adds new metrics (`mssql_server_total_memory_bytes`, `mssql_server_target_memory_bytes`, - and `mssql_available_commit_memory_bytes`) for `mssql` integration. + and `mssql_available_commit_memory_bytes`) for `mssql` integration (@StefanKurek). - Grafana Agent Operator: `config-reloader` container no longer runs as root. (@rootmout) @@ -140,6 +153,8 @@ v0.38.0 (2023-11-21) - Allow agent to start with `module.git` config if cached before. (@hainenber) +- Adds new optional config parameter `query_config` to `mssql` integration to allow for custom metrics (@StefanKurek) + ### Bugfixes - Set exit code 1 on grafana-agentctl non-runnable command. (@fgouteroux) @@ -197,6 +212,8 @@ v0.38.0 (2023-11-21) - Fix converter output for prometheus.exporter.windows to not unnecessarily add empty blocks. (@erikbaranowski) +- Fix converter issue with `loki.relabel` and `max_cache_size` being set to 0 instead of default (10_000). (@mattdurham) + ### Other changes - Bump `mysqld_exporter` version to v0.15.0. (@marctc) diff --git a/component/otelcol/exporter/prometheus/internal/convert/convert.go b/component/otelcol/exporter/prometheus/internal/convert/convert.go index 3e2a2578c5a4..0a7039e7195e 100644 --- a/component/otelcol/exporter/prometheus/internal/convert/convert.go +++ b/component/otelcol/exporter/prometheus/internal/convert/convert.go @@ -65,6 +65,8 @@ type Options struct { IncludeScopeLabels bool // AddMetricSuffixes controls whether suffixes are added to metric names. Defaults to true. AddMetricSuffixes bool + // ResourceToTelemetryConversion controls whether to convert resource attributes to Prometheus-compatible datapoint attributes + ResourceToTelemetryConversion bool } var _ consumer.Metrics = (*Converter)(nil) @@ -131,6 +133,7 @@ func (conv *Converter) consumeResourceMetrics(app storage.Appender, rm pmetric.R Type: textparse.MetricTypeGauge, Help: "Target metadata", }) + resAttrs := rm.Resource().Attributes() memResource := conv.getOrCreateResource(rm.Resource()) if conv.getOpts().IncludeTargetInfo { @@ -144,7 +147,7 @@ func (conv *Converter) consumeResourceMetrics(app storage.Appender, rm pmetric.R for smcount := 0; smcount < rm.ScopeMetrics().Len(); smcount++ { sm := rm.ScopeMetrics().At(smcount) - conv.consumeScopeMetrics(app, memResource, sm) + conv.consumeScopeMetrics(app, memResource, sm, resAttrs) } } @@ -219,7 +222,7 @@ func (conv *Converter) getOrCreateResource(res pcommon.Resource) *memorySeries { return entry } -func (conv *Converter) consumeScopeMetrics(app storage.Appender, memResource *memorySeries, sm pmetric.ScopeMetrics) { +func (conv *Converter) consumeScopeMetrics(app storage.Appender, memResource *memorySeries, sm pmetric.ScopeMetrics, resAttrs pcommon.Map) { scopeMD := conv.createOrUpdateMetadata("otel_scope_info", metadata.Metadata{ Type: textparse.MetricTypeGauge, }) @@ -236,7 +239,7 @@ func (conv *Converter) consumeScopeMetrics(app storage.Appender, memResource *me for mcount := 0; mcount < sm.Metrics().Len(); mcount++ { m := sm.Metrics().At(mcount) - conv.consumeMetric(app, memResource, memScope, m) + conv.consumeMetric(app, memResource, memScope, m, resAttrs) } } @@ -274,20 +277,27 @@ func (conv *Converter) getOrCreateScope(res *memorySeries, scope pcommon.Instrum return entry } -func (conv *Converter) consumeMetric(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric) { +func (conv *Converter) consumeMetric(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric, resAttrs pcommon.Map) { switch m.Type() { case pmetric.MetricTypeGauge: - conv.consumeGauge(app, memResource, memScope, m) + conv.consumeGauge(app, memResource, memScope, m, resAttrs) case pmetric.MetricTypeSum: - conv.consumeSum(app, memResource, memScope, m) + conv.consumeSum(app, memResource, memScope, m, resAttrs) case pmetric.MetricTypeHistogram: - conv.consumeHistogram(app, memResource, memScope, m) + conv.consumeHistogram(app, memResource, memScope, m, resAttrs) case pmetric.MetricTypeSummary: - conv.consumeSummary(app, memResource, memScope, m) + conv.consumeSummary(app, memResource, memScope, m, resAttrs) } } -func (conv *Converter) consumeGauge(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric) { +func joinAttributeMaps(from, to pcommon.Map) { + from.Range(func(k string, v pcommon.Value) bool { + v.CopyTo(to.PutEmpty(k)) + return true + }) +} + +func (conv *Converter) consumeGauge(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric, resAttrs pcommon.Map) { metricName := prometheus.BuildCompliantName(m, "", conv.opts.AddMetricSuffixes) metricMD := conv.createOrUpdateMetadata(metricName, metadata.Metadata{ @@ -302,6 +312,10 @@ func (conv *Converter) consumeGauge(app storage.Appender, memResource *memorySer for dpcount := 0; dpcount < m.Gauge().DataPoints().Len(); dpcount++ { dp := m.Gauge().DataPoints().At(dpcount) + if conv.getOpts().ResourceToTelemetryConversion { + joinAttributeMaps(resAttrs, dp.Attributes()) + } + memSeries := conv.getOrCreateSeries(memResource, memScope, metricName, dp.Attributes()) if err := writeSeries(app, memSeries, dp, getNumberDataPointValue(dp)); err != nil { level.Error(conv.log).Log("msg", "failed to write metric sample", metricName, "err", err) @@ -389,7 +403,7 @@ func getNumberDataPointValue(dp pmetric.NumberDataPoint) float64 { return 0 } -func (conv *Converter) consumeSum(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric) { +func (conv *Converter) consumeSum(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric, resAttrs pcommon.Map) { metricName := prometheus.BuildCompliantName(m, "", conv.opts.AddMetricSuffixes) // Excerpt from the spec: @@ -430,6 +444,10 @@ func (conv *Converter) consumeSum(app storage.Appender, memResource *memorySerie for dpcount := 0; dpcount < m.Sum().DataPoints().Len(); dpcount++ { dp := m.Sum().DataPoints().At(dpcount) + if conv.getOpts().ResourceToTelemetryConversion { + joinAttributeMaps(resAttrs, dp.Attributes()) + } + memSeries := conv.getOrCreateSeries(memResource, memScope, metricName, dp.Attributes()) val := getNumberDataPointValue(dp) @@ -447,7 +465,7 @@ func (conv *Converter) consumeSum(app storage.Appender, memResource *memorySerie } } -func (conv *Converter) consumeHistogram(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric) { +func (conv *Converter) consumeHistogram(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric, resAttrs pcommon.Map) { metricName := prometheus.BuildCompliantName(m, "", conv.opts.AddMetricSuffixes) if m.Histogram().AggregationTemporality() != pmetric.AggregationTemporalityCumulative { @@ -469,6 +487,10 @@ func (conv *Converter) consumeHistogram(app storage.Appender, memResource *memor for dpcount := 0; dpcount < m.Histogram().DataPoints().Len(); dpcount++ { dp := m.Histogram().DataPoints().At(dpcount) + if conv.getOpts().ResourceToTelemetryConversion { + joinAttributeMaps(resAttrs, dp.Attributes()) + } + // Sum metric if dp.HasSum() { sumMetric := conv.getOrCreateSeries(memResource, memScope, metricName+"_sum", dp.Attributes()) @@ -606,7 +628,7 @@ func (conv *Converter) convertExemplar(otelExemplar pmetric.Exemplar, ts time.Ti } } -func (conv *Converter) consumeSummary(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric) { +func (conv *Converter) consumeSummary(app storage.Appender, memResource *memorySeries, memScope *memorySeries, m pmetric.Metric, resAttrs pcommon.Map) { metricName := prometheus.BuildCompliantName(m, "", conv.opts.AddMetricSuffixes) metricMD := conv.createOrUpdateMetadata(metricName, metadata.Metadata{ @@ -621,6 +643,10 @@ func (conv *Converter) consumeSummary(app storage.Appender, memResource *memoryS for dpcount := 0; dpcount < m.Summary().DataPoints().Len(); dpcount++ { dp := m.Summary().DataPoints().At(dpcount) + if conv.getOpts().ResourceToTelemetryConversion { + joinAttributeMaps(resAttrs, dp.Attributes()) + } + // Sum metric { sumMetric := conv.getOrCreateSeries(memResource, memScope, metricName+"_sum", dp.Attributes()) diff --git a/component/otelcol/exporter/prometheus/internal/convert/convert_test.go b/component/otelcol/exporter/prometheus/internal/convert/convert_test.go index 80a6bce1a55b..dcace6574b6c 100644 --- a/component/otelcol/exporter/prometheus/internal/convert/convert_test.go +++ b/component/otelcol/exporter/prometheus/internal/convert/convert_test.go @@ -18,12 +18,13 @@ func TestConverter(t *testing.T) { input string expect string - showTimestamps bool - includeTargetInfo bool - includeScopeInfo bool - includeScopeLabels bool - addMetricSuffixes bool - enableOpenMetrics bool + showTimestamps bool + includeTargetInfo bool + includeScopeInfo bool + includeScopeLabels bool + addMetricSuffixes bool + enableOpenMetrics bool + resourceToTelemetryConversion bool }{ { name: "Gauge", @@ -838,6 +839,274 @@ func TestConverter(t *testing.T) { addMetricSuffixes: true, enableOpenMetrics: true, }, + { + name: "Gauge: convert resource attributes to metric label", + input: `{ + "resource_metrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "myservice" } + }, { + "key": "service.instance.id", + "value": { "stringValue": "instance" } + }, { + "key": "raw", + "value": { "stringValue": "test" } + },{ + "key": "foo.one", + "value": { "stringValue": "foo" } + }, { + "key": "bar.one", + "value": { "stringValue": "bar" } + }] + }, + "scope_metrics": [{ + "metrics": [{ + "name": "test_metric_gauge", + "gauge": { + "data_points": [{ + "as_double": 1234.56 + }] + } + }] + }] + }] + }`, + expect: ` + # TYPE test_metric_gauge gauge + test_metric_gauge{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 1234.56 + `, + enableOpenMetrics: true, + resourceToTelemetryConversion: true, + }, + { + name: "Gauge: NOT convert resource attributes to metric label", + input: `{ + "resource_metrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "myservice" } + }, { + "key": "service.instance.id", + "value": { "stringValue": "instance" } + }, { + "key": "raw", + "value": { "stringValue": "test" } + },{ + "key": "foo.one", + "value": { "stringValue": "foo" } + }, { + "key": "bar.one", + "value": { "stringValue": "bar" } + }] + }, + "scope_metrics": [{ + "metrics": [{ + "name": "test_metric_gauge", + "gauge": { + "data_points": [{ + "as_double": 1234.56 + }] + } + }] + }] + }] + }`, + expect: ` + # TYPE test_metric_gauge gauge + test_metric_gauge{instance="instance",job="myservice"} 1234.56 + `, + enableOpenMetrics: true, + resourceToTelemetryConversion: false, + }, + { + name: "Summary: convert resource attributes to metric label", + input: `{ + "resource_metrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "myservice" } + }, { + "key": "service.instance.id", + "value": { "stringValue": "instance" } + }, { + "key": "raw", + "value": { "stringValue": "test" } + },{ + "key": "foo.one", + "value": { "stringValue": "foo" } + }, { + "key": "bar.one", + "value": { "stringValue": "bar" } + }] + }, + "scope_metrics": [{ + "metrics": [{ + "name": "test_metric_summary", + "unit": "seconds", + "summary": { + "data_points": [{ + "start_time_unix_nano": 1000000000, + "time_unix_nano": 1000000000, + "count": 333, + "sum": 100, + "quantile_values": [ + { "quantile": 0, "value": 100 }, + { "quantile": 0.5, "value": 400 }, + { "quantile": 1, "value": 500 } + ] + }] + } + }] + }] + }] + }`, + expect: ` + # TYPE test_metric_summary summary + test_metric_summary{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",quantile="0.0"} 100.0 + test_metric_summary{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",quantile="0.5"} 400.0 + test_metric_summary{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",quantile="1.0"} 500.0 + test_metric_summary_sum{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 100.0 + test_metric_summary_count{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 333 + `, + enableOpenMetrics: true, + resourceToTelemetryConversion: true, + }, + { + name: "Histogram: convert resource attributes to metric label", + input: `{ + "resource_metrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "myservice" } + }, { + "key": "service.instance.id", + "value": { "stringValue": "instance" } + }, { + "key": "raw", + "value": { "stringValue": "test" } + },{ + "key": "foo.one", + "value": { "stringValue": "foo" } + }, { + "key": "bar.one", + "value": { "stringValue": "bar" } + }] + }, + "scope_metrics": [{ + "metrics": [ + { + "name": "test_metric_histogram", + "unit": "seconds", + "histogram": { + "aggregation_temporality": 2, + "data_points": [{ + "start_time_unix_nano": 1000000000, + "time_unix_nano": 1000000000, + "count": 333, + "sum": 100, + "bucket_counts": [0, 111, 0, 222], + "explicit_bounds": [0.25, 0.5, 0.75, 1.0], + "exemplars":[ + { + "time_unix_nano": 1000000001, + "as_double": 0.3, + "span_id": "aaaaaaaaaaaaaaaa", + "trace_id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + }, + { + "time_unix_nano": 1000000003, + "as_double": 1.5, + "span_id": "cccccccccccccccc", + "trace_id": "cccccccccccccccccccccccccccccccc" + }, + { + "time_unix_nano": 1000000002, + "as_double": 0.5, + "span_id": "bbbbbbbbbbbbbbbb", + "trace_id": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb" + } + ] + }] + } + } + ] + }] + }] + }`, + expect: ` + # TYPE test_metric_histogram histogram + test_metric_histogram_bucket{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",le="0.25"} 0 + test_metric_histogram_bucket{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",le="0.5"} 111 # {span_id="aaaaaaaaaaaaaaaa",trace_id="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} 0.3 + test_metric_histogram_bucket{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",le="0.75"} 111 # {span_id="bbbbbbbbbbbbbbbb",trace_id="bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"} 0.5 + test_metric_histogram_bucket{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",le="1.0"} 333 + test_metric_histogram_bucket{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test",le="+Inf"} 333 # {span_id="cccccccccccccccc",trace_id="cccccccccccccccccccccccccccccccc"} 1.5 + test_metric_histogram_sum{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 100.0 + test_metric_histogram_count{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 333 + `, + enableOpenMetrics: true, + resourceToTelemetryConversion: true, + }, + { + name: "Monotonic sum: convert resource attributes to metric label", + input: `{ + "resource_metrics": [{ + "resource": { + "attributes": [{ + "key": "service.name", + "value": { "stringValue": "myservice" } + }, { + "key": "service.instance.id", + "value": { "stringValue": "instance" } + }, { + "key": "raw", + "value": { "stringValue": "test" } + },{ + "key": "foo.one", + "value": { "stringValue": "foo" } + }, { + "key": "bar.one", + "value": { "stringValue": "bar" } + }] + }, + "scope_metrics": [{ + "metrics": [ + { + "name": "test_metric_mono_sum_total", + "unit": "seconds", + "sum": { + "aggregation_temporality": 2, + "is_monotonic": true, + "data_points": [{ + "start_time_unix_nano": 1000000000, + "time_unix_nano": 1000000000, + "as_double": 15, + "exemplars":[ + { + "time_unix_nano": 1000000001, + "as_double": 0.3, + "span_id": "aaaaaaaaaaaaaaaa", + "trace_id": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + } + ] + }] + } + } + ] + }] + }] + }`, + expect: ` + # TYPE test_metric_mono_sum counter + test_metric_mono_sum_total{bar_one="bar",foo_one="foo",instance="instance",service_instance_id="instance",job="myservice",service_name="myservice",raw="test"} 15.0 # {span_id="aaaaaaaaaaaaaaaa",trace_id="aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"} 0.3 + `, + enableOpenMetrics: true, + resourceToTelemetryConversion: true, + }, } decoder := &pmetric.JSONUnmarshaler{} @@ -851,10 +1120,11 @@ func TestConverter(t *testing.T) { l := util.TestLogger(t) conv := convert.New(l, appenderAppendable{Inner: &app}, convert.Options{ - IncludeTargetInfo: tc.includeTargetInfo, - IncludeScopeInfo: tc.includeScopeInfo, - IncludeScopeLabels: tc.includeScopeLabels, - AddMetricSuffixes: tc.addMetricSuffixes, + IncludeTargetInfo: tc.includeTargetInfo, + IncludeScopeInfo: tc.includeScopeInfo, + IncludeScopeLabels: tc.includeScopeLabels, + AddMetricSuffixes: tc.addMetricSuffixes, + ResourceToTelemetryConversion: tc.resourceToTelemetryConversion, }) require.NoError(t, conv.ConsumeMetrics(context.Background(), payload)) diff --git a/component/otelcol/exporter/prometheus/prometheus.go b/component/otelcol/exporter/prometheus/prometheus.go index 77e894dc6321..7da1c03868ea 100644 --- a/component/otelcol/exporter/prometheus/prometheus.go +++ b/component/otelcol/exporter/prometheus/prometheus.go @@ -31,21 +31,23 @@ func init() { // Arguments configures the otelcol.exporter.prometheus component. type Arguments struct { - IncludeTargetInfo bool `river:"include_target_info,attr,optional"` - IncludeScopeInfo bool `river:"include_scope_info,attr,optional"` - IncludeScopeLabels bool `river:"include_scope_labels,attr,optional"` - GCFrequency time.Duration `river:"gc_frequency,attr,optional"` - ForwardTo []storage.Appendable `river:"forward_to,attr"` - AddMetricSuffixes bool `river:"add_metric_suffixes,attr,optional"` + IncludeTargetInfo bool `river:"include_target_info,attr,optional"` + IncludeScopeInfo bool `river:"include_scope_info,attr,optional"` + IncludeScopeLabels bool `river:"include_scope_labels,attr,optional"` + GCFrequency time.Duration `river:"gc_frequency,attr,optional"` + ForwardTo []storage.Appendable `river:"forward_to,attr"` + AddMetricSuffixes bool `river:"add_metric_suffixes,attr,optional"` + ResourceToTelemetryConversion bool `river:"resource_to_telemetry_conversion,attr,optional"` } // DefaultArguments holds defaults values. var DefaultArguments = Arguments{ - IncludeTargetInfo: true, - IncludeScopeInfo: false, - IncludeScopeLabels: true, - GCFrequency: 5 * time.Minute, - AddMetricSuffixes: true, + IncludeTargetInfo: true, + IncludeScopeInfo: false, + IncludeScopeLabels: true, + GCFrequency: 5 * time.Minute, + AddMetricSuffixes: true, + ResourceToTelemetryConversion: false, } // SetToDefault implements river.Defaulter. @@ -151,8 +153,9 @@ func (c *Component) Update(newConfig component.Arguments) error { func convertArgumentsToConvertOptions(args Arguments) convert.Options { return convert.Options{ - IncludeTargetInfo: args.IncludeTargetInfo, - IncludeScopeInfo: args.IncludeScopeInfo, - AddMetricSuffixes: args.AddMetricSuffixes, + IncludeTargetInfo: args.IncludeTargetInfo, + IncludeScopeInfo: args.IncludeScopeInfo, + AddMetricSuffixes: args.AddMetricSuffixes, + ResourceToTelemetryConversion: args.ResourceToTelemetryConversion, } } diff --git a/component/otelcol/exporter/prometheus/prometheus_test.go b/component/otelcol/exporter/prometheus/prometheus_test.go index 2939c8962346..7e642ff9b585 100644 --- a/component/otelcol/exporter/prometheus/prometheus_test.go +++ b/component/otelcol/exporter/prometheus/prometheus_test.go @@ -23,12 +23,13 @@ func TestArguments_UnmarshalRiver(t *testing.T) { forward_to = [] `, expected: prometheus.Arguments{ - IncludeTargetInfo: true, - IncludeScopeInfo: false, - IncludeScopeLabels: true, - GCFrequency: 5 * time.Minute, - AddMetricSuffixes: true, - ForwardTo: []storage.Appendable{}, + IncludeTargetInfo: true, + IncludeScopeInfo: false, + IncludeScopeLabels: true, + GCFrequency: 5 * time.Minute, + AddMetricSuffixes: true, + ForwardTo: []storage.Appendable{}, + ResourceToTelemetryConversion: false, }, }, { @@ -39,15 +40,17 @@ func TestArguments_UnmarshalRiver(t *testing.T) { include_scope_labels = false gc_frequency = "1s" add_metric_suffixes = false + resource_to_telemetry_conversion = true forward_to = [] `, expected: prometheus.Arguments{ - IncludeTargetInfo: false, - IncludeScopeInfo: true, - IncludeScopeLabels: false, - GCFrequency: 1 * time.Second, - AddMetricSuffixes: false, - ForwardTo: []storage.Appendable{}, + IncludeTargetInfo: false, + IncludeScopeInfo: true, + IncludeScopeLabels: false, + GCFrequency: 1 * time.Second, + AddMetricSuffixes: false, + ForwardTo: []storage.Appendable{}, + ResourceToTelemetryConversion: true, }, }, { diff --git a/component/otelcol/internal/fanoutconsumer/logs.go b/component/otelcol/internal/fanoutconsumer/logs.go index a01202686e01..a8ee4df45b7f 100644 --- a/component/otelcol/internal/fanoutconsumer/logs.go +++ b/component/otelcol/internal/fanoutconsumer/logs.go @@ -29,6 +29,10 @@ func Logs(in []otelcol.Consumer) otelconsumer.Logs { for i := 0; i < len(in)-1; i++ { consumer := in[i] + if consumer == nil { + continue + } + if consumer.Capabilities().MutatesData { clone = append(clone, consumer) } else { @@ -40,10 +44,12 @@ func Logs(in []otelcol.Consumer) otelconsumer.Logs { // The final consumer can be given to the passthrough list regardless of // whether it mutates as long as there's no other read-only consumers. - if len(passthrough) == 0 || !last.Capabilities().MutatesData { - passthrough = append(passthrough, last) - } else { - clone = append(clone, last) + if last != nil { + if len(passthrough) == 0 || !last.Capabilities().MutatesData { + passthrough = append(passthrough, last) + } else { + clone = append(clone, last) + } } return &logsFanout{ diff --git a/component/otelcol/receiver/prometheus/prometheus.go b/component/otelcol/receiver/prometheus/prometheus.go index 5a54de7337b8..b0493f4d6982 100644 --- a/component/otelcol/receiver/prometheus/prometheus.go +++ b/component/otelcol/receiver/prometheus/prometheus.go @@ -19,8 +19,8 @@ import ( "github.com/prometheus/prometheus/storage" otelcomponent "go.opentelemetry.io/collector/component" otelreceiver "go.opentelemetry.io/collector/receiver" - "go.opentelemetry.io/otel/metric/noop" - "go.opentelemetry.io/otel/trace" + metricNoop "go.opentelemetry.io/otel/metric/noop" + traceNoop "go.opentelemetry.io/otel/trace/noop" ) func init() { @@ -107,8 +107,8 @@ func (c *Component) Update(newConfig component.Arguments) error { Logger: zapadapter.New(c.opts.Logger), // TODO(tpaschalis): expose tracing and logging statistics. - TracerProvider: trace.NewNoopTracerProvider(), - MeterProvider: noop.NewMeterProvider(), + TracerProvider: traceNoop.NewTracerProvider(), + MeterProvider: metricNoop.NewMeterProvider(), ReportComponentStatus: func(*otelcomponent.StatusEvent) error { return nil diff --git a/component/prometheus/exporter/mssql/mssql.go b/component/prometheus/exporter/mssql/mssql.go index 3e9770fb8892..bef73f16a44c 100644 --- a/component/prometheus/exporter/mssql/mssql.go +++ b/component/prometheus/exporter/mssql/mssql.go @@ -2,14 +2,18 @@ package mssql import ( "errors" + "fmt" "time" + "github.com/burningalchemist/sql_exporter/config" "github.com/grafana/agent/component" "github.com/grafana/agent/component/prometheus/exporter" "github.com/grafana/agent/pkg/integrations" "github.com/grafana/agent/pkg/integrations/mssql" + "github.com/grafana/agent/pkg/util" "github.com/grafana/river/rivertypes" config_util "github.com/prometheus/common/config" + "gopkg.in/yaml.v2" ) func init() { @@ -36,10 +40,11 @@ var DefaultArguments = Arguments{ // Arguments controls the mssql exporter. type Arguments struct { - ConnectionString rivertypes.Secret `river:"connection_string,attr"` - MaxIdleConnections int `river:"max_idle_connections,attr,optional"` - MaxOpenConnections int `river:"max_open_connections,attr,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` + ConnectionString rivertypes.Secret `river:"connection_string,attr"` + MaxIdleConnections int `river:"max_idle_connections,attr,optional"` + MaxOpenConnections int `river:"max_open_connections,attr,optional"` + Timeout time.Duration `river:"timeout,attr,optional"` + QueryConfig rivertypes.OptionalSecret `river:"query_config,attr,optional"` } // SetToDefault implements river.Defaulter. @@ -60,6 +65,13 @@ func (a *Arguments) Validate() error { if a.Timeout <= 0 { return errors.New("timeout must be positive") } + + var collectorConfig config.CollectorConfig + err := yaml.UnmarshalStrict([]byte(a.QueryConfig.Value), &collectorConfig) + if err != nil { + return fmt.Errorf("invalid query_config: %s", err) + } + return nil } @@ -69,5 +81,6 @@ func (a *Arguments) Convert() *mssql.Config { MaxIdleConnections: a.MaxIdleConnections, MaxOpenConnections: a.MaxOpenConnections, Timeout: a.Timeout, + QueryConfig: util.RawYAML(a.QueryConfig.Value), } } diff --git a/component/prometheus/exporter/mssql/mssql_test.go b/component/prometheus/exporter/mssql/mssql_test.go index b9a47ad3b776..4fad4a819780 100644 --- a/component/prometheus/exporter/mssql/mssql_test.go +++ b/component/prometheus/exporter/mssql/mssql_test.go @@ -4,11 +4,13 @@ import ( "testing" "time" + "github.com/burningalchemist/sql_exporter/config" "github.com/grafana/agent/pkg/integrations/mssql" "github.com/grafana/river" "github.com/grafana/river/rivertypes" config_util "github.com/prometheus/common/config" "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" ) func TestRiverUnmarshal(t *testing.T) { @@ -16,8 +18,7 @@ func TestRiverUnmarshal(t *testing.T) { connection_string = "sqlserver://user:pass@localhost:1433" max_idle_connections = 3 max_open_connections = 3 - timeout = "10s" - ` + timeout = "10s"` var args Arguments err := river.Unmarshal([]byte(riverConfig), &args) @@ -33,6 +34,64 @@ func TestRiverUnmarshal(t *testing.T) { require.Equal(t, expected, args) } +func TestRiverUnmarshalWithInlineQueryConfig(t *testing.T) { + riverConfig := ` + connection_string = "sqlserver://user:pass@localhost:1433" + max_idle_connections = 3 + max_open_connections = 3 + timeout = "10s" + query_config = "{ collector_name: mssql_standard, metrics: [ { metric_name: mssql_local_time_seconds, type: gauge, help: 'Local time in seconds since epoch (Unix time).', values: [ unix_time ], query: \"SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time\" } ] }"` + + var args Arguments + err := river.Unmarshal([]byte(riverConfig), &args) + require.NoError(t, err) + var collectorConfig config.CollectorConfig + err = yaml.UnmarshalStrict([]byte(args.QueryConfig.Value), &collectorConfig) + require.NoError(t, err) + + require.Equal(t, rivertypes.Secret("sqlserver://user:pass@localhost:1433"), args.ConnectionString) + require.Equal(t, 3, args.MaxIdleConnections) + require.Equal(t, 3, args.MaxOpenConnections) + require.Equal(t, 10*time.Second, args.Timeout) + require.Equal(t, "mssql_standard", collectorConfig.Name) + require.Equal(t, 1, len(collectorConfig.Metrics)) + require.Equal(t, "mssql_local_time_seconds", collectorConfig.Metrics[0].Name) + require.Equal(t, "gauge", collectorConfig.Metrics[0].TypeString) + require.Equal(t, "Local time in seconds since epoch (Unix time).", collectorConfig.Metrics[0].Help) + require.Equal(t, 1, len(collectorConfig.Metrics[0].Values)) + require.Contains(t, collectorConfig.Metrics[0].Values, "unix_time") + require.Equal(t, "SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time", collectorConfig.Metrics[0].QueryLiteral) +} + +func TestRiverUnmarshalWithInlineQueryConfigYaml(t *testing.T) { + riverConfig := ` + connection_string = "sqlserver://user:pass@localhost:1433" + max_idle_connections = 3 + max_open_connections = 3 + timeout = "10s" + query_config = "collector_name: mssql_standard\nmetrics:\n- metric_name: mssql_local_time_seconds\n type: gauge\n help: 'Local time in seconds since epoch (Unix time).'\n values: [unix_time]\n query: \"SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time\""` + + var args Arguments + err := river.Unmarshal([]byte(riverConfig), &args) + require.NoError(t, err) + var collectorConfig config.CollectorConfig + err = yaml.UnmarshalStrict([]byte(args.QueryConfig.Value), &collectorConfig) + require.NoError(t, err) + + require.Equal(t, rivertypes.Secret("sqlserver://user:pass@localhost:1433"), args.ConnectionString) + require.Equal(t, 3, args.MaxIdleConnections) + require.Equal(t, 3, args.MaxOpenConnections) + require.Equal(t, 10*time.Second, args.Timeout) + require.Equal(t, "mssql_standard", collectorConfig.Name) + require.Equal(t, 1, len(collectorConfig.Metrics)) + require.Equal(t, "mssql_local_time_seconds", collectorConfig.Metrics[0].Name) + require.Equal(t, "gauge", collectorConfig.Metrics[0].TypeString) + require.Equal(t, "Local time in seconds since epoch (Unix time).", collectorConfig.Metrics[0].Help) + require.Equal(t, 1, len(collectorConfig.Metrics[0].Values)) + require.Contains(t, collectorConfig.Metrics[0].Values, "unix_time") + require.Equal(t, "SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time", collectorConfig.Metrics[0].QueryLiteral) +} + func TestUnmarshalInvalid(t *testing.T) { invalidRiverConfig := ` connection_string = "sqlserver://user:pass@localhost:1433" @@ -44,6 +103,37 @@ func TestUnmarshalInvalid(t *testing.T) { var invalidArgs Arguments err := river.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) require.Error(t, err) + require.EqualError(t, err, "timeout must be positive") +} + +func TestUnmarshalInvalidQueryConfigYaml(t *testing.T) { + invalidRiverConfig := ` + connection_string = "sqlserver://user:pass@localhost:1433" + max_idle_connections = 1 + max_open_connections = 1 + timeout = "1s" + query_config = "{ collector_name: mssql_standard, metrics: [ { metric_name: mssql_local_time_seconds, type: gauge, help: 'Local time in seconds since epoch (Unix time).', values: [ unix_time ], query: \"SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time\" }" + ` + + var invalidArgs Arguments + err := river.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) + require.Error(t, err) + require.EqualError(t, err, "invalid query_config: yaml: line 1: did not find expected ',' or ']'") +} + +func TestUnmarshalInvalidProperty(t *testing.T) { + invalidRiverConfig := ` + connection_string = "sqlserver://user:pass@localhost:1433" + max_idle_connections = 1 + max_open_connections = 1 + timeout = "1s" + query_config = "collector_name: mssql_standard\nbad_param: true\nmetrics:\n- metric_name: mssql_local_time_seconds\n type: gauge\n help: 'Local time in seconds since epoch (Unix time).'\n values: [unix_time]\n query: \"SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time\"" + ` + + var invalidArgs Arguments + err := river.Unmarshal([]byte(invalidRiverConfig), &invalidArgs) + require.Error(t, err) + require.EqualError(t, err, "invalid query_config: unknown fields in collector: bad_param") } func TestArgumentsValidate(t *testing.T) { @@ -89,6 +179,9 @@ func TestArgumentsValidate(t *testing.T) { MaxIdleConnections: 1, MaxOpenConnections: 1, Timeout: 10 * time.Second, + QueryConfig: rivertypes.OptionalSecret{ + Value: `{ collector_name: mssql_standard, metrics: [ { metric_name: mssql_local_time_seconds, type: gauge, help: 'Local time in seconds since epoch (Unix time).', values: [ unix_time ], query: "SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time" } ] }`, + }, }, wantErr: false, }, @@ -107,20 +200,31 @@ func TestArgumentsValidate(t *testing.T) { } func TestConvert(t *testing.T) { - riverConfig := ` - connection_string = "sqlserver://user:pass@localhost:1433" - ` - var args Arguments - err := river.Unmarshal([]byte(riverConfig), &args) - require.NoError(t, err) + strQueryConfig := `collector_name: mssql_standard +metrics: +- metric_name: mssql_local_time_seconds + type: gauge + help: 'Local time in seconds since epoch (Unix time).' + values: [unix_time] + query: "SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time"` + args := Arguments{ + ConnectionString: rivertypes.Secret("sqlserver://user:pass@localhost:1433"), + MaxIdleConnections: 1, + MaxOpenConnections: 1, + Timeout: 10 * time.Second, + QueryConfig: rivertypes.OptionalSecret{ + Value: strQueryConfig, + }, + } res := args.Convert() expected := mssql.Config{ ConnectionString: config_util.Secret("sqlserver://user:pass@localhost:1433"), - MaxIdleConnections: DefaultArguments.MaxIdleConnections, - MaxOpenConnections: DefaultArguments.MaxOpenConnections, - Timeout: DefaultArguments.Timeout, + MaxIdleConnections: 1, + MaxOpenConnections: 1, + Timeout: 10 * time.Second, + QueryConfig: []byte(strQueryConfig), } require.Equal(t, expected, *res) } diff --git a/component/prometheus/operator/common/crdmanager.go b/component/prometheus/operator/common/crdmanager.go index 9f8bd55f79f6..85f13719e970 100644 --- a/component/prometheus/operator/common/crdmanager.go +++ b/component/prometheus/operator/common/crdmanager.go @@ -42,12 +42,19 @@ const informerSyncTimeout = 10 * time.Second // crdManager is all of the fields required to run a crd based component. // on update, this entire thing should be recreated and restarted type crdManager struct { - mut sync.Mutex - discoveryConfigs map[string]discovery.Configs - scrapeConfigs map[string]*config.ScrapeConfig - debugInfo map[string]*operator.DiscoveredResource - discoveryManager *discovery.Manager - scrapeManager *scrape.Manager + mut sync.Mutex + + // these maps are keyed by job name + discoveryConfigs map[string]discovery.Configs + scrapeConfigs map[string]*config.ScrapeConfig + + // list of keys to the above maps for a given resource by `ns/name` + crdsToMapKeys map[string][]string + // debug info by `kind/ns/name` + debugInfo map[string]*operator.DiscoveredResource + + discoveryManager discoveryManager + scrapeManager scrapeManager clusteringUpdated chan struct{} ls labelstore.LabelStore @@ -80,6 +87,7 @@ func newCrdManager(opts component.Options, cluster cluster.Cluster, logger log.L cluster: cluster, discoveryConfigs: map[string]discovery.Configs{}, scrapeConfigs: map[string]*config.ScrapeConfig{}, + crdsToMapKeys: map[string][]string{}, debugInfo: map[string]*operator.DiscoveredResource{}, kind: kind, clusteringUpdated: make(chan struct{}, 1), @@ -392,6 +400,7 @@ func (c *crdManager) addPodMonitor(pm *promopv1.PodMonitor) { AdditionalRelabelConfigs: c.args.RelabelConfigs, ScrapeOptions: c.args.Scrape, } + mapKeys := []string{} for i, ep := range pm.Spec.PodMetricsEndpoints { var scrapeConfig *config.ScrapeConfig scrapeConfig, err = gen.GeneratePodMonitorConfig(pm, ep, i) @@ -400,6 +409,7 @@ func (c *crdManager) addPodMonitor(pm *promopv1.PodMonitor) { level.Error(c.logger).Log("name", pm.Name, "err", err, "msg", "error generating scrapeconfig from podmonitor") break } + mapKeys = append(mapKeys, scrapeConfig.JobName) c.mut.Lock() c.discoveryConfigs[scrapeConfig.JobName] = scrapeConfig.ServiceDiscoveryConfigs c.scrapeConfigs[scrapeConfig.JobName] = scrapeConfig @@ -409,6 +419,9 @@ func (c *crdManager) addPodMonitor(pm *promopv1.PodMonitor) { c.addDebugInfo(pm.Namespace, pm.Name, err) return } + c.mut.Lock() + c.crdsToMapKeys[fmt.Sprintf("%s/%s", pm.Namespace, pm.Name)] = mapKeys + c.mut.Unlock() if err = c.apply(); err != nil { level.Error(c.logger).Log("name", pm.Name, "err", err, "msg", "error applying scrape configs from "+c.kind) } @@ -442,6 +455,8 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) { AdditionalRelabelConfigs: c.args.RelabelConfigs, ScrapeOptions: c.args.Scrape, } + + mapKeys := []string{} for i, ep := range sm.Spec.Endpoints { var scrapeConfig *config.ScrapeConfig scrapeConfig, err = gen.GenerateServiceMonitorConfig(sm, ep, i) @@ -450,6 +465,7 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) { level.Error(c.logger).Log("name", sm.Name, "err", err, "msg", "error generating scrapeconfig from serviceMonitor") break } + mapKeys = append(mapKeys, scrapeConfig.JobName) c.mut.Lock() c.discoveryConfigs[scrapeConfig.JobName] = scrapeConfig.ServiceDiscoveryConfigs c.scrapeConfigs[scrapeConfig.JobName] = scrapeConfig @@ -459,6 +475,9 @@ func (c *crdManager) addServiceMonitor(sm *promopv1.ServiceMonitor) { c.addDebugInfo(sm.Namespace, sm.Name, err) return } + c.mut.Lock() + c.crdsToMapKeys[fmt.Sprintf("%s/%s", sm.Namespace, sm.Name)] = mapKeys + c.mut.Unlock() if err = c.apply(); err != nil { level.Error(c.logger).Log("name", sm.Name, "err", err, "msg", "error applying scrape configs from "+c.kind) } @@ -503,6 +522,7 @@ func (c *crdManager) addProbe(p *promopv1.Probe) { c.mut.Lock() c.discoveryConfigs[pmc.JobName] = pmc.ServiceDiscoveryConfigs c.scrapeConfigs[pmc.JobName] = pmc + c.crdsToMapKeys[fmt.Sprintf("%s/%s", p.Namespace, p.Name)] = []string{pmc.JobName} c.mut.Unlock() if err = c.apply(); err != nil { @@ -533,12 +553,10 @@ func (c *crdManager) onDeleteProbe(obj interface{}) { func (c *crdManager) clearConfigs(ns, name string) { c.mut.Lock() defer c.mut.Unlock() - prefix := fmt.Sprintf("%s/%s/%s", c.kind, ns, name) - for k := range c.discoveryConfigs { - if strings.HasPrefix(k, prefix) { - delete(c.discoveryConfigs, k) - delete(c.scrapeConfigs, k) - } + + for _, k := range c.crdsToMapKeys[fmt.Sprintf("%s/%s", ns, name)] { + delete(c.discoveryConfigs, k) + delete(c.scrapeConfigs, k) } - delete(c.debugInfo, prefix) + delete(c.debugInfo, fmt.Sprintf("%s/%s/%s", c.kind, ns, name)) } diff --git a/component/prometheus/operator/common/crdmanager_test.go b/component/prometheus/operator/common/crdmanager_test.go new file mode 100644 index 000000000000..7e3cd75fbd37 --- /dev/null +++ b/component/prometheus/operator/common/crdmanager_test.go @@ -0,0 +1,168 @@ +package common + +import ( + "testing" + + "golang.org/x/exp/maps" + + "github.com/go-kit/log" + "github.com/grafana/agent/component" + "github.com/grafana/agent/component/prometheus/operator" + "github.com/grafana/agent/service/cluster" + "github.com/grafana/agent/service/labelstore" + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/scrape" + "k8s.io/apimachinery/pkg/util/intstr" + + promopv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/stretchr/testify/require" +) + +func TestClearConfigsSameNsSamePrefix(t *testing.T) { + logger := log.NewNopLogger() + m := newCrdManager( + component.Options{ + Logger: logger, + GetServiceData: func(name string) (interface{}, error) { return nil, nil }, + }, + cluster.Mock(), + logger, + &operator.DefaultArguments, + KindServiceMonitor, + labelstore.New(logger), + ) + + m.discoveryManager = newMockDiscoveryManager() + m.scrapeManager = newMockScrapeManager() + + targetPort := intstr.FromInt(9090) + m.onAddServiceMonitor(&promopv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "svcmonitor", + }, + Spec: promopv1.ServiceMonitorSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "group": "my-group", + }, + }, + Endpoints: []promopv1.Endpoint{ + { + TargetPort: &targetPort, + ScrapeTimeout: "5s", + Interval: "10s", + }, + }, + }, + }) + m.onAddServiceMonitor(&promopv1.ServiceMonitor{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "svcmonitor-another", + }, + Spec: promopv1.ServiceMonitorSpec{ + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "group": "my-group", + }, + }, + Endpoints: []promopv1.Endpoint{ + { + TargetPort: &targetPort, + ScrapeTimeout: "5s", + Interval: "10s", + }, + }, + }}) + + require.ElementsMatch(t, []string{"serviceMonitor/monitoring/svcmonitor-another/0", "serviceMonitor/monitoring/svcmonitor/0"}, maps.Keys(m.discoveryConfigs)) + m.clearConfigs("monitoring", "svcmonitor") + require.ElementsMatch(t, []string{"monitoring/svcmonitor", "monitoring/svcmonitor-another"}, maps.Keys(m.crdsToMapKeys)) + require.ElementsMatch(t, []string{"serviceMonitor/monitoring/svcmonitor-another/0"}, maps.Keys(m.discoveryConfigs)) + require.ElementsMatch(t, []string{"serviceMonitor/monitoring/svcmonitor-another"}, maps.Keys(m.debugInfo)) +} + +func TestClearConfigsProbe(t *testing.T) { + logger := log.NewNopLogger() + m := newCrdManager( + component.Options{ + Logger: logger, + GetServiceData: func(name string) (interface{}, error) { return nil, nil }, + }, + cluster.Mock(), + logger, + &operator.DefaultArguments, + KindProbe, + labelstore.New(logger), + ) + + m.discoveryManager = newMockDiscoveryManager() + m.scrapeManager = newMockScrapeManager() + + m.onAddProbe(&promopv1.Probe{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "probe", + }, + Spec: promopv1.ProbeSpec{}, + }) + m.onAddProbe(&promopv1.Probe{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "monitoring", + Name: "probe-another", + }, + Spec: promopv1.ProbeSpec{}}) + + require.ElementsMatch(t, []string{"probe/monitoring/probe-another", "probe/monitoring/probe"}, maps.Keys(m.discoveryConfigs)) + m.clearConfigs("monitoring", "probe") + require.ElementsMatch(t, []string{"monitoring/probe", "monitoring/probe-another"}, maps.Keys(m.crdsToMapKeys)) + require.ElementsMatch(t, []string{"probe/monitoring/probe-another"}, maps.Keys(m.discoveryConfigs)) + require.ElementsMatch(t, []string{"probe/monitoring/probe-another"}, maps.Keys(m.debugInfo)) +} + +type mockDiscoveryManager struct { +} + +func newMockDiscoveryManager() *mockDiscoveryManager { + return &mockDiscoveryManager{} +} + +func (m *mockDiscoveryManager) Run() error { + return nil +} + +func (m *mockDiscoveryManager) SyncCh() <-chan map[string][]*targetgroup.Group { + return nil +} + +func (m *mockDiscoveryManager) ApplyConfig(cfg map[string]discovery.Configs) error { + return nil +} + +type mockScrapeManager struct { +} + +func newMockScrapeManager() *mockScrapeManager { + return &mockScrapeManager{} +} + +func (m *mockScrapeManager) Run(tsets <-chan map[string][]*targetgroup.Group) error { + return nil +} + +func (m *mockScrapeManager) Stop() { + +} + +func (m *mockScrapeManager) TargetsActive() map[string][]*scrape.Target { + return nil +} + +func (m *mockScrapeManager) ApplyConfig(cfg *config.Config) error { + return nil +} diff --git a/component/prometheus/operator/common/interfaces.go b/component/prometheus/operator/common/interfaces.go new file mode 100644 index 000000000000..4652154f6dc6 --- /dev/null +++ b/component/prometheus/operator/common/interfaces.go @@ -0,0 +1,23 @@ +package common + +import ( + "github.com/prometheus/prometheus/config" + "github.com/prometheus/prometheus/discovery" + "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/scrape" +) + +// discoveryManager is an interface around discovery.Manager +type discoveryManager interface { + Run() error + SyncCh() <-chan map[string][]*targetgroup.Group + ApplyConfig(cfg map[string]discovery.Configs) error +} + +// scrapeManager is an interface around scrape.Manager +type scrapeManager interface { + Run(tsets <-chan map[string][]*targetgroup.Group) error + Stop() + TargetsActive() map[string][]*scrape.Target + ApplyConfig(cfg *config.Config) error +} diff --git a/converter/internal/promtailconvert/internal/build/scrape_builder.go b/converter/internal/promtailconvert/internal/build/scrape_builder.go index fc26d29cc832..c7288be0fc01 100644 --- a/converter/internal/promtailconvert/internal/build/scrape_builder.go +++ b/converter/internal/promtailconvert/internal/build/scrape_builder.go @@ -103,6 +103,11 @@ func (s *ScrapeConfigBuilder) getOrNewLokiRelabel() string { args := lokirelabel.Arguments{ ForwardTo: s.getOrNewProcessStageReceivers(), RelabelConfigs: component.ToFlowRelabelConfigs(s.cfg.RelabelConfigs), + // max_cache_size doesnt exist in static, and we need to manually set it to default. + // Since the default is 10_000 if we didnt set the value, it would compare the default 10k to 0 and emit 0. + // We actually dont want to emit anything since this setting doesnt exist in static, setting to 10k matches the default + // and ensures it doesnt get emitted. + MaxCacheSize: lokirelabel.DefaultArguments.MaxCacheSize, } compLabel := common.LabelForParts(s.globalCtx.LabelPrefix, s.cfg.JobName) s.f.Body().AppendBlock(common.NewBlockWithOverride([]string{"loki", "relabel"}, compLabel, args)) diff --git a/converter/internal/promtailconvert/testdata/cloudflare_relabel.river b/converter/internal/promtailconvert/testdata/cloudflare_relabel.river index 201ce8f30356..014d812eab61 100644 --- a/converter/internal/promtailconvert/testdata/cloudflare_relabel.river +++ b/converter/internal/promtailconvert/testdata/cloudflare_relabel.river @@ -5,7 +5,6 @@ loki.relabel "fun" { source_labels = ["__trail__"] target_label = "__path__" } - max_cache_size = 0 } loki.source.cloudflare "fun" { diff --git a/converter/internal/promtailconvert/testdata/windowsevents_relabel.river b/converter/internal/promtailconvert/testdata/windowsevents_relabel.river index 6fa1b693dc1a..39d28dea7a67 100644 --- a/converter/internal/promtailconvert/testdata/windowsevents_relabel.river +++ b/converter/internal/promtailconvert/testdata/windowsevents_relabel.river @@ -5,7 +5,6 @@ loki.relabel "fun" { source_labels = ["__trail__"] target_label = "__path__" } - max_cache_size = 0 } loki.source.windowsevent "fun" { diff --git a/converter/internal/staticconvert/testdata/promtail_scrape.river b/converter/internal/staticconvert/testdata/promtail_scrape.river index 46efa90bcc9f..22ee8576ed96 100644 --- a/converter/internal/staticconvert/testdata/promtail_scrape.river +++ b/converter/internal/staticconvert/testdata/promtail_scrape.river @@ -5,7 +5,6 @@ loki.relabel "logs_log_config_fun" { source_labels = ["__trail__"] target_label = "__path__" } - max_cache_size = 0 } loki.source.cloudflare "logs_log_config_fun" { diff --git a/converter/internal/staticconvert/testdata/sanitize.river b/converter/internal/staticconvert/testdata/sanitize.river index 38dabad4a85c..1bf214eda874 100644 --- a/converter/internal/staticconvert/testdata/sanitize.river +++ b/converter/internal/staticconvert/testdata/sanitize.river @@ -37,7 +37,6 @@ loki.relabel "logs_integrations_integrations_windows_exporter_application" { source_labels = ["computer"] target_label = "agent_hostname" } - max_cache_size = 0 } loki.source.windowsevent "logs_integrations_integrations_windows_exporter_application" { @@ -75,7 +74,6 @@ loki.relabel "logs_integrations_integrations_windows_exporter_system" { source_labels = ["computer"] target_label = "agent_hostname" } - max_cache_size = 0 } loki.source.windowsevent "logs_integrations_integrations_windows_exporter_system" { diff --git a/docs/sources/flow/getting-started/migrating-from-operator.md b/docs/sources/flow/getting-started/migrating-from-operator.md index 1496a8ae5658..c017651cd46d 100644 --- a/docs/sources/flow/getting-started/migrating-from-operator.md +++ b/docs/sources/flow/getting-started/migrating-from-operator.md @@ -34,7 +34,7 @@ This guide will provide some steps to get started with Grafana Agent for users c enabled: true controller: type: 'statefulset' - replicas: 2 + replicas: 2 crds: create: false ``` diff --git a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md b/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md index 8c8f81d133d1..584b30bd97e4 100644 --- a/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md +++ b/docs/sources/flow/reference/components/otelcol.exporter.prometheus.md @@ -46,6 +46,7 @@ Name | Type | Description | Defaul `add_metric_suffixes` | `boolean` | Whether to add type and unit suffixes to metrics names. | `true` | no `gc_frequency` | `duration` | How often to clean up stale metrics from memory. | `"5m"` | no `forward_to` | `list(receiver)` | Where to forward converted Prometheus metrics. | | yes +`resource_to_telemetry_conversion` | `boolean` | Whether to convert OTel resource attributes to Prometheus labels. | `false` | no By default, OpenTelemetry resources are converted into `target_info` metrics. OpenTelemetry instrumentation scopes are converted into `otel_scope_info` diff --git a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md b/docs/sources/flow/reference/components/prometheus.exporter.mssql.md index 84786ee074a0..93fb305f8a5d 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.mssql.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.mssql.md @@ -12,7 +12,8 @@ title: prometheus.exporter.mssql # prometheus.exporter.mssql The `prometheus.exporter.mssql` component embeds -[sql_exporter](https://github.com/burningalchemist/sql_exporter) for collecting stats from a Microsoft SQL Server. +[sql_exporter](https://github.com/burningalchemist/sql_exporter) for collecting stats from a Microsoft SQL Server and exposing them as +Prometheus metrics. ## Usage @@ -27,12 +28,13 @@ prometheus.exporter.mssql "LABEL" { The following arguments can be used to configure the exporter's behavior. Omitted fields take their default values. -| Name | Type | Description | Default | Required | -| ---------------------- | ---------- | ----------------------------------------------------------------- | ------- | -------- | -| `connection_string` | `secret` | The connection string used to connect to an Microsoft SQL Server. | | yes | -| `max_idle_connections` | `int` | Maximum number of idle connections to any one target. | `3` | no | -| `max_open_connections` | `int` | Maximum number of open connections to any one target. | `3` | no | -| `timeout` | `duration` | The query timeout in seconds. | `"10s"` | no | +| Name | Type | Description | Default | Required | +| ---------------------- | ---------- | ------------------------------------------------------------------- | ------- | -------- | +| `connection_string` | `secret` | The connection string used to connect to an Microsoft SQL Server. | | yes | +| `max_idle_connections` | `int` | Maximum number of idle connections to any one target. | `3` | no | +| `max_open_connections` | `int` | Maximum number of open connections to any one target. | `3` | no | +| `timeout` | `duration` | The query timeout in seconds. | `"10s"` | no | +| `query_config` | `string` | MSSQL query to Prometheus metric configuration as an inline string. | | no | [The sql_exporter examples](https://github.com/burningalchemist/sql_exporter/blob/master/examples/azure-sql-mi/sql_exporter.yml#L21) show the format of the `connection_string` argument: @@ -40,6 +42,15 @@ Omitted fields take their default values. sqlserver://USERNAME_HERE:PASSWORD_HERE@SQLMI_HERE_ENDPOINT.database.windows.net:1433?encrypt=true&hostNameInCertificate=%2A.SQL_MI_DOMAIN_HERE.database.windows.net&trustservercertificate=true ``` +If specified, the `query_config` argument must be a YAML document as string defining which MSSQL queries map to custom Prometheus metrics. +`query_config` is typically loaded by using the exports of another component. For example, + +- `local.file.LABEL.content` +- `remote.http.LABEL.content` +- `remote.s3.LABEL.content` + +See [sql_exporter](https://github.com/burningalchemist/sql_exporter#collectors) for details on how to create a configuration. + ## Blocks The `prometheus.exporter.mssql` component does not support any blocks, and is configured @@ -100,3 +111,222 @@ Replace the following: - `PASSWORD`: The password to use for authentication to the remote_write API. [scrape]: {{< relref "./prometheus.scrape.md" >}} + +## Custom metrics +You can use the optional `query_config` parameter to retrieve custom Prometheus metrics for a MSSQL instance. + +If this is defined, the new configuration will be used to query your MSSQL instance and create whatever Prometheus metrics are defined. +If you want additional metrics on top of the default metrics, the default configuration must be used as a base. + +The default configuration used by this integration is as follows: +``` +collector_name: mssql_standard + +metrics: + - metric_name: mssql_local_time_seconds + type: gauge + help: 'Local time in seconds since epoch (Unix time).' + values: [unix_time] + query: | + SELECT DATEDIFF(second, '19700101', GETUTCDATE()) AS unix_time + - metric_name: mssql_connections + type: gauge + help: 'Number of active connections.' + key_labels: + - db + values: [count] + query: | + SELECT DB_NAME(sp.dbid) AS db, COUNT(sp.spid) AS count + FROM sys.sysprocesses sp + GROUP BY DB_NAME(sp.dbid) + # + # Collected from sys.dm_os_performance_counters + # + - metric_name: mssql_deadlocks_total + type: counter + help: 'Number of lock requests that resulted in a deadlock.' + values: [cntr_value] + query: | + SELECT cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Number of Deadlocks/sec' AND instance_name = '_Total' + - metric_name: mssql_user_errors_total + type: counter + help: 'Number of user errors.' + values: [cntr_value] + query: | + SELECT cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Errors/sec' AND instance_name = 'User Errors' + - metric_name: mssql_kill_connection_errors_total + type: counter + help: 'Number of severe errors that caused SQL Server to kill the connection.' + values: [cntr_value] + query: | + SELECT cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Errors/sec' AND instance_name = 'Kill Connection Errors' + - metric_name: mssql_page_life_expectancy_seconds + type: gauge + help: 'The minimum number of seconds a page will stay in the buffer pool on this node without references.' + values: [cntr_value] + query: | + SELECT top(1) cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Page life expectancy' + - metric_name: mssql_batch_requests_total + type: counter + help: 'Number of command batches received.' + values: [cntr_value] + query: | + SELECT cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Batch Requests/sec' + - metric_name: mssql_log_growths_total + type: counter + help: 'Number of times the transaction log has been expanded, per database.' + key_labels: + - db + values: [cntr_value] + query: | + SELECT rtrim(instance_name) AS db, cntr_value + FROM sys.dm_os_performance_counters WITH (NOLOCK) + WHERE counter_name = 'Log Growths' AND instance_name <> '_Total' + - metric_name: mssql_buffer_cache_hit_ratio + type: gauge + help: 'Ratio of requests that hit the buffer cache' + values: [BufferCacheHitRatio] + query: | + SELECT (a.cntr_value * 1.0 / b.cntr_value) * 100.0 as BufferCacheHitRatio + FROM sys.dm_os_performance_counters a + JOIN (SELECT cntr_value, OBJECT_NAME + FROM sys.dm_os_performance_counters + WHERE counter_name = 'Buffer cache hit ratio base' + AND OBJECT_NAME = 'SQLServer:Buffer Manager') b ON a.OBJECT_NAME = b.OBJECT_NAME + WHERE a.counter_name = 'Buffer cache hit ratio' + AND a.OBJECT_NAME = 'SQLServer:Buffer Manager' + + - metric_name: mssql_checkpoint_pages_sec + type: gauge + help: 'Checkpoint Pages Per Second' + values: [cntr_value] + query: | + SELECT cntr_value + FROM sys.dm_os_performance_counters + WHERE [counter_name] = 'Checkpoint pages/sec' + # + # Collected from sys.dm_io_virtual_file_stats + # + - metric_name: mssql_io_stall_seconds_total + type: counter + help: 'Stall time in seconds per database and I/O operation.' + key_labels: + - db + value_label: operation + values: + - read + - write + query_ref: mssql_io_stall + + # + # Collected from sys.dm_os_process_memory + # + - metric_name: mssql_resident_memory_bytes + type: gauge + help: 'SQL Server resident memory size (AKA working set).' + values: [resident_memory_bytes] + query_ref: mssql_process_memory + + - metric_name: mssql_virtual_memory_bytes + type: gauge + help: 'SQL Server committed virtual memory size.' + values: [virtual_memory_bytes] + query_ref: mssql_process_memory + + - metric_name: mssql_available_commit_memory_bytes + type: gauge + help: 'SQL Server available to be committed memory size.' + values: [available_commit_limit_bytes] + query_ref: mssql_process_memory + + - metric_name: mssql_memory_utilization_percentage + type: gauge + help: 'The percentage of committed memory that is in the working set.' + values: [memory_utilization_percentage] + query_ref: mssql_process_memory + + - metric_name: mssql_page_fault_count_total + type: counter + help: 'The number of page faults that were incurred by the SQL Server process.' + values: [page_fault_count] + query_ref: mssql_process_memory + + # + # Collected from sys.dm_os_sys_info + # + - metric_name: mssql_server_total_memory_bytes + type: gauge + help: 'SQL Server committed memory in the memory manager.' + values: [committed_memory_bytes] + query_ref: mssql_os_sys_info + + - metric_name: mssql_server_target_memory_bytes + type: gauge + help: 'SQL Server target committed memory set for the memory manager.' + values: [committed_memory_target_bytes] + query_ref: mssql_os_sys_info + + # + # Collected from sys.dm_os_sys_memory + # + - metric_name: mssql_os_memory + type: gauge + help: 'OS physical memory, used and available.' + value_label: 'state' + values: [used, available] + query: | + SELECT + (total_physical_memory_kb - available_physical_memory_kb) * 1024 AS used, + available_physical_memory_kb * 1024 AS available + FROM sys.dm_os_sys_memory + - metric_name: mssql_os_page_file + type: gauge + help: 'OS page file, used and available.' + value_label: 'state' + values: [used, available] + query: | + SELECT + (total_page_file_kb - available_page_file_kb) * 1024 AS used, + available_page_file_kb * 1024 AS available + FROM sys.dm_os_sys_memory +queries: + # Populates `mssql_io_stall` and `mssql_io_stall_total` + - query_name: mssql_io_stall + query: | + SELECT + cast(DB_Name(a.database_id) as varchar) AS [db], + sum(io_stall_read_ms) / 1000.0 AS [read], + sum(io_stall_write_ms) / 1000.0 AS [write] + FROM + sys.dm_io_virtual_file_stats(null, null) a + INNER JOIN sys.master_files b ON a.database_id = b.database_id AND a.file_id = b.file_id + GROUP BY a.database_id + # Populates `mssql_resident_memory_bytes`, `mssql_virtual_memory_bytes`, mssql_available_commit_memory_bytes, + # and `mssql_memory_utilization_percentage`, and `mssql_page_fault_count_total` + - query_name: mssql_process_memory + query: | + SELECT + physical_memory_in_use_kb * 1024 AS resident_memory_bytes, + virtual_address_space_committed_kb * 1024 AS virtual_memory_bytes, + available_commit_limit_kb * 1024 AS available_commit_limit_bytes, + memory_utilization_percentage, + page_fault_count + FROM sys.dm_os_process_memory + # Populates `mssql_server_total_memory_bytes` and `mssql_server_target_memory_bytes`. + - query_name: mssql_os_sys_info + query: | + SELECT + committed_kb * 1024 AS committed_memory_bytes, + committed_target_kb * 1024 AS committed_memory_target_bytes + FROM sys.dm_os_sys_info +``` diff --git a/docs/sources/flow/reference/components/prometheus.exporter.windows.md b/docs/sources/flow/reference/components/prometheus.exporter.windows.md index f06aba58eaa2..f0fab521a83e 100644 --- a/docs/sources/flow/reference/components/prometheus.exporter.windows.md +++ b/docs/sources/flow/reference/components/prometheus.exporter.windows.md @@ -271,6 +271,11 @@ Name | Description | Enabled by default See the linked documentation on each collector for more information on reported metrics, configuration settings and usage examples. +{{% admonition type="caution" %}} +Certain collectors will cause Grafana Agent to crash if those collectors are used and the required infrastructure is not installed. +These include but are not limited to mscluster_*, vmware, nps, dns, msmq, teradici_pcoip, ad, hyperv, and scheduled_task. +{{% /admonition %}} + ## Example This example uses a [`prometheus.scrape` component][scrape] to collect metrics diff --git a/docs/sources/flow/reference/components/prometheus.remote_write.md b/docs/sources/flow/reference/components/prometheus.remote_write.md index ca8ab98051c2..64b3efd3bc26 100644 --- a/docs/sources/flow/reference/components/prometheus.remote_write.md +++ b/docs/sources/flow/reference/components/prometheus.remote_write.md @@ -374,7 +374,7 @@ prometheus.remote_write "staging" { url = "http://mimir:9009/api/v1/push" headers = { - "X-Scope-OrgID" = "staging" + "X-Scope-OrgID" = "staging", } } } diff --git a/docs/sources/operator/helm-getting-started.md b/docs/sources/operator/helm-getting-started.md index bc2f516e10fa..78245505d859 100644 --- a/docs/sources/operator/helm-getting-started.md +++ b/docs/sources/operator/helm-getting-started.md @@ -20,8 +20,8 @@ In this guide, you'll learn how to deploy [Grafana Agent Operator]({{< relref ". To deploy Agent Operator with Helm, make sure that you have the following: - A Kubernetes cluster -- The `[kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)` command-line client installed and configured on your machine -- The `[helm](https://helm.sh/docs/intro/install/)` command-line client installed and configured on your machine +- The [`kubectl`](https://kubernetes.io/docs/tasks/tools/#kubectl) command-line client installed and configured on your machine +- The [`helm`](https://helm.sh/docs/intro/install/) command-line client installed and configured on your machine > **Note:** Agent Operator is currently in beta and its custom resources are subject to change. diff --git a/docs/sources/shared/flow/reference/components/authorization-block.md b/docs/sources/shared/flow/reference/components/authorization-block.md index 190cd11f8bb9..11a74326f997 100644 --- a/docs/sources/shared/flow/reference/components/authorization-block.md +++ b/docs/sources/shared/flow/reference/components/authorization-block.md @@ -10,11 +10,10 @@ description: Shared content, authorization block headless: true --- -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`type` | `string` | Authorization type, for example, "Bearer". | | no -`credentials` | `secret` | Secret value. | | no -`credentials_file` | `string` | File containing the secret value. | | no +Name | Type | Description | Default | Required +-------------------|----------|--------------------------------------------|---------|--------- +`credentials_file` | `string` | File containing the secret value. | | no +`credentials` | `secret` | Secret value. | | no +`type` | `string` | Authorization type, for example, "Bearer". | | no -`credential` and `credentials_file` are mutually exclusive and only one can be -provided inside of an `authorization` block. +`credential` and `credentials_file` are mutually exclusive, and only one can be provided inside an `authorization` block. diff --git a/docs/sources/shared/flow/reference/components/azuread-block.md b/docs/sources/shared/flow/reference/components/azuread-block.md index ebdf436d02fe..07d974385134 100644 --- a/docs/sources/shared/flow/reference/components/azuread-block.md +++ b/docs/sources/shared/flow/reference/components/azuread-block.md @@ -10,11 +10,11 @@ description: Shared content, azuread block headless: true --- -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- +Name | Type | Description | Default | Required +--------|----------|------------------|-----------------|--------- `cloud` | `string` | The Azure Cloud. | `"AzurePublic"` | no The supported values for `cloud` are: * `"AzurePublic"` * `"AzureChina"` -* `"AzureGovernment"` \ No newline at end of file +* `"AzureGovernment"` diff --git a/docs/sources/shared/flow/reference/components/basic-auth-block.md b/docs/sources/shared/flow/reference/components/basic-auth-block.md index 06c81f660e3c..62f7e0a25d61 100644 --- a/docs/sources/shared/flow/reference/components/basic-auth-block.md +++ b/docs/sources/shared/flow/reference/components/basic-auth-block.md @@ -10,11 +10,10 @@ description: Shared content, basic auth block headless: true --- -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`username` | `string` | Basic auth username. | | no -`password` | `secret` | Basic auth password. | | no -`password_file` | `string` | File containing the basic auth password. | | no +Name | Type | Description | Default | Required +----------------|----------|------------------------------------------|---------|--------- +`password_file` | `string` | File containing the basic auth password. | | no +`password` | `secret` | Basic auth password. | | no +`username` | `string` | Basic auth username. | | no -`password` and `password_file` are mutually exclusive and only one can be -provided inside of a `basic_auth` block. +`password` and `password_file` are mutually exclusive, and only one can be provided inside a `basic_auth` block. diff --git a/docs/sources/shared/flow/reference/components/exporter-component-exports.md b/docs/sources/shared/flow/reference/components/exporter-component-exports.md index beb717a13fae..f1a8ca440cd9 100644 --- a/docs/sources/shared/flow/reference/components/exporter-component-exports.md +++ b/docs/sources/shared/flow/reference/components/exporter-component-exports.md @@ -13,15 +13,12 @@ headless: true The following fields are exported and can be referenced by other components. Name | Type | Description ---------- | ------------------- | ----------- +----------|---------------------|---------------------------------------------------------- `targets` | `list(map(string))` | The targets that can be used to collect exporter metrics. -For example, the `targets` can either be passed to a `discovery.relabel` -component to rewrite the targets' label sets, or to a `prometheus.scrape` -component that collects the exposed metrics. +For example, the `targets` can either be passed to a `discovery.relabel` component to rewrite the targets' label sets or to a `prometheus.scrape` component that collects the exposed metrics. -The exported targets will use the configured [in-memory traffic][] address -specified by the [run command][]. +The exported targets use the configured [in-memory traffic][] address specified by the [run command][]. [in-memory traffic]: {{< relref "../../../../flow/concepts/component_controller.md#in-memory-traffic" >}} [run command]: {{< relref "../../../../flow/reference/cli/run.md" >}} diff --git a/docs/sources/shared/flow/reference/components/extract-field-block.md b/docs/sources/shared/flow/reference/components/extract-field-block.md index 5036097d155f..207f2bc6053d 100644 --- a/docs/sources/shared/flow/reference/components/extract-field-block.md +++ b/docs/sources/shared/flow/reference/components/extract-field-block.md @@ -12,31 +12,28 @@ headless: true The following attributes are supported: -Name | Type | Description | Default | Required ----- |----------------|----------------------------------------------------------------------------------------------------------|---------| -------- -`tag_name` | `string` | The name of the resource attribute that will be added to logs, metrics, or spans. | `""` | no -`key` | `string` | The annotation (or label) name. This must exactly match an annotation (or label) name. | `""` | no -`key_regex` | `string` | A regular expression used to extract a key that matches the regex. | `""` | no -`regex` | `string` | An optional field used to extract a sub-string from a complex field value. | `""` | no -`from` | `string` | The source of the labels or annotations. Allowed values are `pod` and `namespace`. | `pod` | no - -When `tag_name` is not specified, a default tag name will be used with the format: +Name | Type | Description | Default | Required +------------|----------|----------------------------------------------------------------------------------------|---------|--------- +`from` | `string` | The source of the labels or annotations. Allowed values are `pod` and `namespace`. | `pod` | no +`key_regex` | `string` | A regular expression used to extract a key that matches the regular expression. | `""` | no +`key` | `string` | The annotation or label name. This key must exactly match an annotation or label name. | `""` | no +`regex` | `string` | An optional field used to extract a sub-string from a complex field value. | `""` | no +`tag_name` | `string` | The name of the resource attribute added to logs, metrics, or spans. | `""` | no + +When you don't specify the `tag_name`, a default tag name is used with the format: * `k8s.pod.annotations.` * `k8s.pod.labels.