Skip to content

Commit

Permalink
wire up static traces converter test for all major scenarios and squa…
Browse files Browse the repository at this point in the history
…sh a bug along the way

Signed-off-by: erikbaranowski <[email protected]>
  • Loading branch information
erikbaranowski committed Mar 29, 2024
1 parent 88780b8 commit 06a6e62
Show file tree
Hide file tree
Showing 3 changed files with 147 additions and 36 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -75,13 +75,19 @@ func removeReceiver(otelCfg *otelcol.Config, pipelineType otel_component.Type, r
}

delete(otelCfg.Receivers, otel_component.NewID(receiverType))
spr := make([]otel_component.ID, 0, len(otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Receivers)-1)
for _, r := range otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Receivers {
if r != otel_component.NewID(receiverType) {
spr = append(spr, r)
for ix, p := range otelCfg.Service.Pipelines {
if ix.Type() != pipelineType {
continue
}

spr := make([]otel_component.ID, 0)
for _, r := range p.Receivers {
if r.Type() != receiverType {
spr = append(spr, r)
}
}
otelCfg.Service.Pipelines[ix].Receivers = spr
}
otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Receivers = spr
}

// removeProcessor removes a processor from the otel config for a specific pipeline type.
Expand All @@ -91,11 +97,17 @@ func removeProcessor(otelCfg *otelcol.Config, pipelineType otel_component.Type,
}

delete(otelCfg.Processors, otel_component.NewID(processorType))
spr := make([]otel_component.ID, 0, len(otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Processors)-1)
for _, r := range otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Processors {
if r != otel_component.NewID(processorType) {
spr = append(spr, r)
for ix, p := range otelCfg.Service.Pipelines {
if ix.Type() != pipelineType {
continue
}

spr := make([]otel_component.ID, 0)
for _, r := range p.Processors {
if r.Type() != processorType {
spr = append(spr, r)
}
}
otelCfg.Service.Pipelines[ix].Processors = spr
}
otelCfg.Service.Pipelines[otel_component.NewID(pipelineType)].Processors = spr
}
103 changes: 85 additions & 18 deletions internal/converter/internal/staticconvert/testdata/traces.river
Original file line number Diff line number Diff line change
@@ -1,4 +1,19 @@
otelcol.receiver.otlp "default" {
otelcol.extension.jaeger_remote_sampling "default_0" {
grpc { }

http { }

source {
remote {
endpoint = "jaeger-collector:14250"
compression = ""
write_buffer_size = "0B"
}
reload_interval = "30s"
}
}

otelcol.receiver.otlp "_0_default" {
grpc {
include_metadata = true
}
Expand All @@ -10,11 +25,11 @@ otelcol.receiver.otlp "default" {
output {
metrics = []
logs = []
traces = [otelcol.processor.discovery.default.input]
traces = [otelcol.processor.discovery._0_default.input]
}
}

discovery.azure "default_prometheus1" {
discovery.azure "_0_default_prometheus1" {
subscription_id = "subscription1"

oauth {
Expand All @@ -28,17 +43,17 @@ discovery.azure "default_prometheus1" {
}
}

discovery.lightsail "default_prometheus1" {
discovery.lightsail "_0_default_prometheus1" {
region = "us-east-1"
access_key = "YOUR_ACCESS_KEY"
secret_key = "YOUR_SECRET_KEY"
port = 8080
}

discovery.relabel "default_prometheus1" {
discovery.relabel "_0_default_prometheus1" {
targets = concat(
discovery.azure.default_prometheus1.targets,
discovery.lightsail.default_prometheus1.targets,
discovery.azure._0_default_prometheus1.targets,
discovery.lightsail._0_default_prometheus1.targets,
)

rule {
Expand All @@ -52,18 +67,19 @@ discovery.relabel "default_prometheus1" {
}
}

otelcol.processor.discovery "default" {
targets = discovery.relabel.default_prometheus1.output
pod_associations = []
otelcol.processor.discovery "_0_default" {
targets = discovery.relabel._0_default_prometheus1.output
operation_type = "update"
pod_associations = ["ip", "net.host.ip"]

output {
metrics = []
logs = []
traces = [otelcol.processor.attributes.default.input]
traces = [otelcol.processor.attributes._0_default.input]
}
}

otelcol.processor.attributes "default" {
otelcol.processor.attributes "_0_default" {
action {
key = "db.table"
action = "delete"
Expand All @@ -72,30 +88,81 @@ otelcol.processor.attributes "default" {
output {
metrics = []
logs = []
traces = [otelcol.processor.tail_sampling.default.input]
traces = [otelcol.exporter.loadbalancing._0_default.input, otelcol.exporter.logging._0_default.input]
}
}

otelcol.exporter.loadbalancing "_0_default" {
protocol {
otlp {
retry {
max_elapsed_time = "1m0s"
}

client {
compression = "none"
}
}
}

resolver {
static {
hostnames = ["tempo1.example.com", "tempo2.example.com"]
}
}
routing_key = ""
}

otelcol.exporter.logging "_0_default" { }

otelcol.receiver.otlp "_1_lb" {
grpc {
endpoint = "0.0.0.0:4318"
}

output {
metrics = []
logs = []
traces = [otelcol.processor.tail_sampling._1_default.input]
}
}

otelcol.processor.tail_sampling "default" {
otelcol.processor.tail_sampling "_1_default" {
policy {
name = "test-policy-1"
type = "always_sample"
}
decision_wait = "5s"

output {
traces = [otelcol.exporter.otlp.default_0.input, otelcol.exporter.logging.default.input]
traces = [otelcol.processor.batch._1_default.input]
}
}

otelcol.exporter.otlp "default_0" {
otelcol.processor.batch "_1_default" {
timeout = "5s"
send_batch_size = 2048
send_batch_max_size = 4096

output {
metrics = []
logs = []
traces = [otelcol.exporter.otlp._1_0.input, otelcol.exporter.logging._1_default.input]
}
}

otelcol.exporter.otlp "_1_0" {
retry_on_failure {
max_elapsed_time = "1m0s"
}

client {
endpoint = "http://localhost:1234/write"
endpoint = "tempo.example.com:14250"

tls {
insecure = true
}
}
}

otelcol.exporter.logging "default" { }
otelcol.exporter.logging "_1_default" { }
48 changes: 40 additions & 8 deletions internal/converter/internal/staticconvert/testdata/traces.yaml
Original file line number Diff line number Diff line change
@@ -1,15 +1,24 @@
traces:
configs:
- name: trace_config
attributes:
actions:
- key: db.table
action: delete
batch:
timeout: 5s
send_batch_size: 2048
send_batch_max_size: 4096
remote_write:
- endpoint: tempo.example.com:14250
insecure: true
automatic_logging:
backend: "stdout"
receivers:
otlp:
protocols:
grpc:
http:
remote_write:
- endpoint: http://localhost:1234/write
automatic_logging:
backend: "stdout"
scrape_configs:
- job_name: "prometheus1"
azure_sd_configs:
Expand All @@ -27,6 +36,13 @@ traces:
target_label: __param_target1
- source_labels: [__address2__]
target_label: __param_target2
prom_sd_operation_type: "update"
prom_sd_pod_associations:
- ip
- net.host.ip
# spanmetrics:
# namespace: testing
# metrics_instance: default
tail_sampling:
policies:
[
Expand All @@ -35,7 +51,23 @@ traces:
type: always_sample
},
]
attributes:
actions:
- key: db.table
action: delete
load_balancing:
resolver:
static:
hostnames:
- tempo1.example.com
- tempo2.example.com
# service_graphs:
# enabled: true
jaeger_remote_sampling:
- source:
reload_interval: 30s
remote:
endpoint: jaeger-collector:14250

# This metrics config is needed when we enable spanmetrics for traces
#
# metrics:
# global:
# remote_write:
# - url: http://localhost:9009/api/prom/push

0 comments on commit 06a6e62

Please sign in to comment.