diff --git a/internal/converter/internal/staticconvert/internal/build/builder_traces.go b/internal/converter/internal/staticconvert/internal/build/builder_traces.go index 6762d8e580f2..da2411b9eaa3 100644 --- a/internal/converter/internal/staticconvert/internal/build/builder_traces.go +++ b/internal/converter/internal/staticconvert/internal/build/builder_traces.go @@ -75,13 +75,19 @@ func removeReceiver(otelCfg *otelcol.Config, pipelineType otel_component.Type, r } delete(otelCfg.Receivers, otel_component.NewID(receiverType)) - spr := make([]otel_component.ID, 0, len(otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Receivers)-1) - for _, r := range otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Receivers { - if r != otel_component.NewID(receiverType) { - spr = append(spr, r) + for ix, p := range otelCfg.Service.Pipelines { + if ix.Type() != pipelineType { + continue + } + + spr := make([]otel_component.ID, 0) + for _, r := range p.Receivers { + if r.Type() != receiverType { + spr = append(spr, r) + } } + otelCfg.Service.Pipelines[ix].Receivers = spr } - otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Receivers = spr } // removeProcessor removes a processor from the otel config for a specific pipeline type. @@ -91,11 +97,17 @@ func removeProcessor(otelCfg *otelcol.Config, pipelineType otel_component.Type, } delete(otelCfg.Processors, otel_component.NewID(processorType)) - spr := make([]otel_component.ID, 0, len(otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Processors)-1) - for _, r := range otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Processors { - if r != otel_component.NewID(processorType) { - spr = append(spr, r) + for ix, p := range otelCfg.Service.Pipelines { + if ix.Type() != pipelineType { + continue + } + + spr := make([]otel_component.ID, 0) + for _, r := range p.Processors { + if r.Type() != processorType { + spr = append(spr, r) + } } + otelCfg.Service.Pipelines[ix].Processors = spr } - otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Processors = spr } diff --git a/internal/converter/internal/staticconvert/testdata/traces.river b/internal/converter/internal/staticconvert/testdata/traces.river index 716aefad0e3b..c99e40e4346f 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.river +++ b/internal/converter/internal/staticconvert/testdata/traces.river @@ -1,4 +1,19 @@ -otelcol.receiver.otlp "default" { +otelcol.extension.jaeger_remote_sampling "default_0" { + grpc { } + + http { } + + source { + remote { + endpoint = "jaeger-collector:14250" + compression = "" + write_buffer_size = "0B" + } + reload_interval = "30s" + } +} + +otelcol.receiver.otlp "_0_default" { grpc { include_metadata = true } @@ -10,11 +25,11 @@ otelcol.receiver.otlp "default" { output { metrics = [] logs = [] - traces = [otelcol.processor.discovery.default.input] + traces = [otelcol.processor.discovery._0_default.input] } } -discovery.azure "default_prometheus1" { +discovery.azure "_0_default_prometheus1" { subscription_id = "subscription1" oauth { @@ -28,17 +43,17 @@ discovery.azure "default_prometheus1" { } } -discovery.lightsail "default_prometheus1" { +discovery.lightsail "_0_default_prometheus1" { region = "us-east-1" access_key = "YOUR_ACCESS_KEY" secret_key = "YOUR_SECRET_KEY" port = 8080 } -discovery.relabel "default_prometheus1" { +discovery.relabel "_0_default_prometheus1" { targets = concat( - discovery.azure.default_prometheus1.targets, - discovery.lightsail.default_prometheus1.targets, + discovery.azure._0_default_prometheus1.targets, + discovery.lightsail._0_default_prometheus1.targets, ) rule { @@ -52,18 +67,19 @@ discovery.relabel "default_prometheus1" { } } -otelcol.processor.discovery "default" { - targets = discovery.relabel.default_prometheus1.output - pod_associations = [] +otelcol.processor.discovery "_0_default" { + targets = discovery.relabel._0_default_prometheus1.output + operation_type = "update" + pod_associations = ["ip", "net.host.ip"] output { metrics = [] logs = [] - traces = [otelcol.processor.attributes.default.input] + traces = [otelcol.processor.attributes._0_default.input] } } -otelcol.processor.attributes "default" { +otelcol.processor.attributes "_0_default" { action { key = "db.table" action = "delete" @@ -72,11 +88,46 @@ otelcol.processor.attributes "default" { output { metrics = [] logs = [] - traces = [otelcol.processor.tail_sampling.default.input] + traces = [otelcol.exporter.loadbalancing._0_default.input, otelcol.exporter.logging._0_default.input] + } +} + +otelcol.exporter.loadbalancing "_0_default" { + protocol { + otlp { + retry { + max_elapsed_time = "1m0s" + } + + client { + compression = "none" + } + } + } + + resolver { + static { + hostnames = ["tempo1.example.com", "tempo2.example.com"] + } + } + routing_key = "" +} + +otelcol.exporter.logging "_0_default" { } + +otelcol.receiver.otlp "_1_lb" { + grpc { + endpoint = "0.0.0.0:4318" + } + + output { + metrics = [] + logs = [] + traces = [otelcol.processor.tail_sampling._1_default.input] } } -otelcol.processor.tail_sampling "default" { +otelcol.processor.tail_sampling "_1_default" { policy { name = "test-policy-1" type = "always_sample" @@ -84,18 +135,34 @@ otelcol.processor.tail_sampling "default" { decision_wait = "5s" output { - traces = [otelcol.exporter.otlp.default_0.input, otelcol.exporter.logging.default.input] + traces = [otelcol.processor.batch._1_default.input] } } -otelcol.exporter.otlp "default_0" { +otelcol.processor.batch "_1_default" { + timeout = "5s" + send_batch_size = 2048 + send_batch_max_size = 4096 + + output { + metrics = [] + logs = [] + traces = [otelcol.exporter.otlp._1_0.input, otelcol.exporter.logging._1_default.input] + } +} + +otelcol.exporter.otlp "_1_0" { retry_on_failure { max_elapsed_time = "1m0s" } client { - endpoint = "http://localhost:1234/write" + endpoint = "tempo.example.com:14250" + + tls { + insecure = true + } } } -otelcol.exporter.logging "default" { } +otelcol.exporter.logging "_1_default" { } diff --git a/internal/converter/internal/staticconvert/testdata/traces.yaml b/internal/converter/internal/staticconvert/testdata/traces.yaml index 2553da25200a..57e1877a8f72 100644 --- a/internal/converter/internal/staticconvert/testdata/traces.yaml +++ b/internal/converter/internal/staticconvert/testdata/traces.yaml @@ -1,15 +1,24 @@ traces: configs: - name: trace_config + attributes: + actions: + - key: db.table + action: delete + batch: + timeout: 5s + send_batch_size: 2048 + send_batch_max_size: 4096 + remote_write: + - endpoint: tempo.example.com:14250 + insecure: true + automatic_logging: + backend: "stdout" receivers: otlp: protocols: grpc: http: - remote_write: - - endpoint: http://localhost:1234/write - automatic_logging: - backend: "stdout" scrape_configs: - job_name: "prometheus1" azure_sd_configs: @@ -27,6 +36,13 @@ traces: target_label: __param_target1 - source_labels: [__address2__] target_label: __param_target2 + prom_sd_operation_type: "update" + prom_sd_pod_associations: + - ip + - net.host.ip + # spanmetrics: + # namespace: testing + # metrics_instance: default tail_sampling: policies: [ @@ -35,7 +51,23 @@ traces: type: always_sample }, ] - attributes: - actions: - - key: db.table - action: delete \ No newline at end of file + load_balancing: + resolver: + static: + hostnames: + - tempo1.example.com + - tempo2.example.com + # service_graphs: + # enabled: true + jaeger_remote_sampling: + - source: + reload_interval: 30s + remote: + endpoint: jaeger-collector:14250 + +# This metrics config is needed when we enable spanmetrics for traces +# +# metrics: +# global: +# remote_write: +# - url: http://localhost:9009/api/prom/push