From c1793d12ef9a2bdcfcb6623187b35c12c2ace016 Mon Sep 17 00:00:00 2001 From: Erik Baranowski <39704712+erikbaranowski@users.noreply.github.com> Date: Tue, 12 Mar 2024 08:47:05 -0400 Subject: [PATCH 01/12] implement otel to flow converter for attributes processor (#6652) --- .../converter_attributesprocessor.go | 85 +++++++++++++++++++ .../otelcolconvert/testdata/attributes.river | 64 ++++++++++++++ .../otelcolconvert/testdata/attributes.yaml | 51 +++++++++++ 3 files changed, 200 insertions(+) create mode 100644 internal/converter/internal/otelcolconvert/converter_attributesprocessor.go create mode 100644 internal/converter/internal/otelcolconvert/testdata/attributes.river create mode 100644 internal/converter/internal/otelcolconvert/testdata/attributes.yaml diff --git a/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go b/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go new file mode 100644 index 0000000000..c9b9486b26 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/converter_attributesprocessor.go @@ -0,0 +1,85 @@ +package otelcolconvert + +import ( + "fmt" + + "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/agent/internal/component/otelcol/processor/attributes" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor" + "go.opentelemetry.io/collector/component" +) + +func init() { + converters = append(converters, attributesProcessorConverter{}) +} + +type attributesProcessorConverter struct{} + +func (attributesProcessorConverter) Factory() component.Factory { + return attributesprocessor.NewFactory() +} + +func (attributesProcessorConverter) InputComponentName() string { + return "otelcol.processor.attributes" +} + +func (attributesProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { + var diags diag.Diagnostics + + label := state.FlowComponentLabel() + + args := toAttributesProcessor(state, id, cfg.(*attributesprocessor.Config)) + block := common.NewBlockWithOverride([]string{"otelcol", "processor", "attributes"}, label, args) + + diags.Add( + diag.SeverityLevelInfo, + fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + ) + + state.Body().AppendBlock(block) + return diags +} + +func toAttributesProcessor(state *state, id component.InstanceID, cfg *attributesprocessor.Config) *attributes.Arguments { + var ( + nextMetrics = state.Next(id, component.DataTypeMetrics) + nextTraces = state.Next(id, component.DataTypeTraces) + nextLogs = state.Next(id, component.DataTypeLogs) + ) + + return &attributes.Arguments{ + Match: toMatchConfig(cfg), + Actions: toAttrActionKeyValue(encodeMapslice(cfg.Actions)), + Output: &otelcol.ConsumerArguments{ + Metrics: toTokenizedConsumers(nextMetrics), + Logs: toTokenizedConsumers(nextLogs), + Traces: toTokenizedConsumers(nextTraces)}, + } +} + +func toMatchConfig(cfg *attributesprocessor.Config) otelcol.MatchConfig { + return otelcol.MatchConfig{ + Include: toMatchProperties(encodeMapstruct(cfg.Include)), + Exclude: toMatchProperties(encodeMapstruct(cfg.Exclude)), + } +} + +func toAttrActionKeyValue(cfg []map[string]any) []otelcol.AttrActionKeyValue { + result := make([]otelcol.AttrActionKeyValue, 0) + + for _, action := range cfg { + result = append(result, otelcol.AttrActionKeyValue{ + Key: action["key"].(string), + Value: action["value"], + RegexPattern: action["pattern"].(string), + FromAttribute: action["from_attribute"].(string), + FromContext: action["from_context"].(string), + ConvertedType: action["converted_type"].(string), + Action: encodeString(action["action"]), + }) + } + + return result +} diff --git a/internal/converter/internal/otelcolconvert/testdata/attributes.river b/internal/converter/internal/otelcolconvert/testdata/attributes.river new file mode 100644 index 0000000000..493640814c --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/attributes.river @@ -0,0 +1,64 @@ +otelcol.receiver.otlp "default" { + grpc { } + + http { } + + output { + metrics = [otelcol.processor.attributes.default_example.input] + logs = [otelcol.processor.attributes.default_example.input] + traces = [otelcol.processor.attributes.default_example.input] + } +} + +otelcol.processor.attributes "default_example" { + action { + key = "db.table" + action = "delete" + } + + action { + key = "redacted_span" + value = true + action = "upsert" + } + + action { + key = "copy_key" + from_attribute = "key_original" + action = "update" + } + + action { + key = "account_id" + value = 2245 + action = "insert" + } + + action { + key = "account_password" + action = "delete" + } + + action { + key = "account_email" + action = "hash" + } + + action { + key = "http.status_code" + converted_type = "int" + action = "convert" + } + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/attributes.yaml b/internal/converter/internal/otelcolconvert/testdata/attributes.yaml new file mode 100644 index 0000000000..dc9cfcd6e7 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/attributes.yaml @@ -0,0 +1,51 @@ +receivers: + otlp: + protocols: + grpc: + http: + +exporters: + otlp: + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below (balancer_name and queue_size). + endpoint: database:4317 + balancer_name: pick_first + sending_queue: + queue_size: 5000 + +processors: + attributes/example: + actions: + - key: db.table + action: delete + - key: redacted_span + value: true + action: upsert + - key: copy_key + from_attribute: key_original + action: update + - key: account_id + value: 2245 + action: insert + - key: account_password + action: delete + - key: account_email + action: hash + - key: http.status_code + action: convert + converted_type: int + +service: + pipelines: + metrics: + receivers: [otlp] + processors: [attributes/example] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [attributes/example] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [attributes/example] + exporters: [otlp] From 4dd7eea8166913d3470991cb6b17795dee62efa0 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Tue, 12 Mar 2024 15:20:13 +0200 Subject: [PATCH 02/12] docs: reference Grafana Agent Flow as an OTel Collector distribution (#6633) Signed-off-by: Paschalis Tsilias Co-authored-by: Clayton Cornell <131809008+clayton-cornell@users.noreply.github.com> --- README.md | 7 +++---- docs/sources/_index.md | 7 +++---- docs/sources/_index.md.t | 7 +++---- docs/sources/flow/_index.md | 19 ++++++++++++++++++- 4 files changed, 27 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 6e0ee6e5c1..6e4ae73645 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,8 @@

Grafana Agent logo

-Grafana Agent is a vendor-neutral, batteries-included telemetry collector with -configuration inspired by [Terraform][]. It is designed to be flexible, -performant, and compatible with multiple ecosystems such as Prometheus and -OpenTelemetry. +Grafana Agent is an OpenTelemetry Collector distribution with configuration +inspired by [Terraform][]. It is designed to be flexible, performant, and +compatible with multiple ecosystems such as Prometheus and OpenTelemetry. Grafana Agent is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, diff --git a/docs/sources/_index.md b/docs/sources/_index.md index bb360bc949..76c7876f50 100644 --- a/docs/sources/_index.md +++ b/docs/sources/_index.md @@ -15,10 +15,9 @@ cascade: # Grafana Agent -Grafana Agent is a vendor-neutral, batteries-included telemetry collector with -configuration inspired by [Terraform][]. It is designed to be flexible, -performant, and compatible with multiple ecosystems such as Prometheus and -OpenTelemetry. +Grafana Agent is an OpenTelemetry Collector distribution with configuration +inspired by [Terraform][]. It is designed to be flexible, performant, and +compatible with multiple ecosystems such as Prometheus and OpenTelemetry. Grafana Agent is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, diff --git a/docs/sources/_index.md.t b/docs/sources/_index.md.t index ab09e34a8e..f4c9bfc9eb 100644 --- a/docs/sources/_index.md.t +++ b/docs/sources/_index.md.t @@ -15,10 +15,9 @@ cascade: # Grafana Agent -Grafana Agent is a vendor-neutral, batteries-included telemetry collector with -configuration inspired by [Terraform][]. It is designed to be flexible, -performant, and compatible with multiple ecosystems such as Prometheus and -OpenTelemetry. +Grafana Agent is an OpenTelemetry Collector distribution with configuration +inspired by [Terraform][]. It is designed to be flexible, performant, and +compatible with multiple ecosystems such as Prometheus and OpenTelemetry. Grafana Agent is based around **components**. Components are wired together to form programmable observability **pipelines** for telemetry collection, diff --git a/docs/sources/flow/_index.md b/docs/sources/flow/_index.md index 1840476a07..947c898ecc 100644 --- a/docs/sources/flow/_index.md +++ b/docs/sources/flow/_index.md @@ -33,6 +33,19 @@ Components allow for reusability, composability, and focus on a single task. * Use expressions to bind components together to build a programmable pipeline. * Includes a UI for debugging the state of a pipeline. +{{< param "PRODUCT_NAME" >}} is a [distribution][] of the OpenTelemetry +Collector. + +Each distribution offers a different collection of components and capabilities. +As a distribution, {{< param "PRODUCT_NAME" >}} includes dozens of +OpenTelemetry-native [components][] from the OpenTelemetry project and +introduces new features such as programmable pipelines, clustering support, +and the ability to share pipelines around the world. + +In addition to being an OpenTelemetry Collector distribution, +{{< param "PRODUCT_NAME" >}} also includes first-class support for the +Prometheus and Loki ecosystems, allowing you to mix-and-match your pipelines. + ## Example ```river @@ -84,6 +97,8 @@ This feature is experimental, and it doesn't support all River components. * Consult the [Tasks][] instructions to accomplish common objectives with {{< param "PRODUCT_NAME" >}}. * Check out the [Reference][] documentation to find specific information you might be looking for. +[distribution]: https://opentelemetry.io/ecosystem/distributions/ + {{% docs/reference %}} [Install]: "/docs/agent/ -> /docs/agent//flow/get-started/install/" [Install]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/get-started/install/" @@ -94,5 +109,7 @@ This feature is experimental, and it doesn't support all River components. [Tutorials]: "/docs/agent/ -> /docs/agent//flow/tutorials/" [Tutorials]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/tutorials/ [Reference]: "/docs/agent/ -> /docs/agent//flow/reference/" -[Reference]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/ +[Reference]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/" +[components]: "/docs/agent/ -> /docs/agent//flow/reference/" +[components]: "/docs/grafana-cloud/ -> /docs/grafana-cloud/send-data/agent/flow/reference/components" {{% /docs/reference %}} From a28e15941912074a0a9bf24a8d611d14d3fb5dd7 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Tue, 12 Mar 2024 16:23:55 +0200 Subject: [PATCH 03/12] remotecfg: fix race condition in test (#6659) Signed-off-by: Paschalis Tsilias --- internal/service/remotecfg/remotecfg_test.go | 23 +++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/internal/service/remotecfg/remotecfg_test.go b/internal/service/remotecfg/remotecfg_test.go index a370419369..45e1befc73 100644 --- a/internal/service/remotecfg/remotecfg_test.go +++ b/internal/service/remotecfg/remotecfg_test.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "sync" "testing" "time" @@ -76,7 +77,9 @@ func TestAPIResponse(t *testing.T) { env.svc.asClient = client // Mock client to return a valid response. + client.mut.Lock() client.getConfigFunc = buildGetConfigHandler(cfg1) + client.mut.Unlock() // Run the service. go func() { @@ -90,9 +93,9 @@ func TestAPIResponse(t *testing.T) { }, time.Second, 10*time.Millisecond) // Update the response returned by the API. - env.svc.mut.Lock() + client.mut.Lock() client.getConfigFunc = buildGetConfigHandler(cfg2) - env.svc.mut.Unlock() + client.mut.Unlock() // Verify that the service has loaded the updated response. require.EventuallyWithT(t, func(c *assert.CollectT) { @@ -177,29 +180,33 @@ func (f fakeHost) NewController(id string) service.Controller { } type agentClient struct { + mut sync.RWMutex getConfigFunc func(context.Context, *connect.Request[agentv1.GetConfigRequest]) (*connect.Response[agentv1.GetConfigResponse], error) } -func (ag agentClient) GetConfig(ctx context.Context, req *connect.Request[agentv1.GetConfigRequest]) (*connect.Response[agentv1.GetConfigResponse], error) { +func (ag *agentClient) GetConfig(ctx context.Context, req *connect.Request[agentv1.GetConfigRequest]) (*connect.Response[agentv1.GetConfigResponse], error) { + ag.mut.RLock() + defer ag.mut.RUnlock() + if ag.getConfigFunc != nil { return ag.getConfigFunc(ctx, req) } panic("getConfigFunc not set") } -func (ag agentClient) GetAgent(context.Context, *connect.Request[agentv1.GetAgentRequest]) (*connect.Response[agentv1.Agent], error) { +func (ag *agentClient) GetAgent(context.Context, *connect.Request[agentv1.GetAgentRequest]) (*connect.Response[agentv1.Agent], error) { return nil, nil } -func (ag agentClient) CreateAgent(context.Context, *connect.Request[agentv1.CreateAgentRequest]) (*connect.Response[agentv1.Agent], error) { +func (ag *agentClient) CreateAgent(context.Context, *connect.Request[agentv1.CreateAgentRequest]) (*connect.Response[agentv1.Agent], error) { return nil, nil } -func (ag agentClient) UpdateAgent(context.Context, *connect.Request[agentv1.UpdateAgentRequest]) (*connect.Response[agentv1.Agent], error) { +func (ag *agentClient) UpdateAgent(context.Context, *connect.Request[agentv1.UpdateAgentRequest]) (*connect.Response[agentv1.Agent], error) { return nil, nil } -func (ag agentClient) DeleteAgent(context.Context, *connect.Request[agentv1.DeleteAgentRequest]) (*connect.Response[agentv1.DeleteAgentResponse], error) { +func (ag *agentClient) DeleteAgent(context.Context, *connect.Request[agentv1.DeleteAgentRequest]) (*connect.Response[agentv1.DeleteAgentResponse], error) { return nil, nil } -func (ag agentClient) ListAgents(context.Context, *connect.Request[agentv1.ListAgentsRequest]) (*connect.Response[agentv1.Agents], error) { +func (ag *agentClient) ListAgents(context.Context, *connect.Request[agentv1.ListAgentsRequest]) (*connect.Response[agentv1.Agents], error) { return nil, nil } From 7c3372a44baf155c49c8211ad842a2c06c33b3f5 Mon Sep 17 00:00:00 2001 From: Piotr <17101802+thampiotr@users.noreply.github.com> Date: Tue, 12 Mar 2024 14:37:11 +0000 Subject: [PATCH 04/12] Upgrade Go to 1.22.1 (#6646) * Upgrade Go to 1.22.1 * update build images --- .drone/drone.yml | 118 +++++++++--------- CHANGELOG.md | 2 + build-image/Dockerfile | 2 +- build-image/windows/Dockerfile | 2 +- cmd/grafana-agent-operator/Dockerfile | 2 +- cmd/grafana-agent/Dockerfile | 2 +- cmd/grafana-agent/Dockerfile.windows | 2 +- cmd/grafana-agentctl/Dockerfile | 2 +- cmd/grafana-agentctl/Dockerfile.windows | 2 +- docs/Makefile | 6 +- go.mod | 2 +- .../configs/otel-metrics-gen/Dockerfile | 2 +- .../configs/prom-gen/Dockerfile | 2 +- tools/make/build-container.mk | 2 +- 14 files changed, 75 insertions(+), 73 deletions(-) diff --git a/.drone/drone.yml b/.drone/drone.yml index 7630a9fd05..9a9bd6b309 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -110,7 +110,7 @@ steps: - commands: - apt-get update -y && apt-get install -y libsystemd-dev - make lint - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Lint trigger: event: @@ -128,7 +128,7 @@ steps: - ERR_MSG="Dashboard definitions are out of date. Please run 'make generate-dashboards' and commit changes!" - if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Regenerate dashboards trigger: event: @@ -146,7 +146,7 @@ steps: - ERR_MSG="Custom Resource Definitions are out of date. Please run 'make generate-crds' and commit changes!" - if [ ! -z "$(git status --porcelain)" ]; then echo $ERR_MSG >&2; exit 1; fi - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Regenerate crds trigger: event: @@ -161,7 +161,7 @@ platform: steps: - commands: - make GO_TAGS="nodocker" test - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Run Go tests trigger: event: @@ -176,7 +176,7 @@ platform: steps: - commands: - K8S_USE_DOCKER_NETWORK=1 make test - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Run Go tests volumes: - name: docker @@ -199,7 +199,7 @@ platform: steps: - commands: - go test -tags="nodocker,nonetwork" ./... - image: grafana/agent-build-image:0.33.0-windows + image: grafana/agent-build-image:0.40.2-windows name: Run Go tests trigger: ref: @@ -214,7 +214,7 @@ platform: steps: - commands: - make agent-image - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build container volumes: - name: docker @@ -239,7 +239,7 @@ platform: steps: - commands: - make agentctl-image - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build container volumes: - name: docker @@ -264,7 +264,7 @@ platform: steps: - commands: - make operator-image - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build container volumes: - name: docker @@ -290,7 +290,7 @@ platform: steps: - commands: - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows agent' - image: grafana/agent-build-image:0.33.0-windows + image: grafana/agent-build-image:0.40.2-windows name: Build container volumes: - name: docker @@ -316,7 +316,7 @@ platform: steps: - commands: - '& "C:/Program Files/git/bin/bash.exe" ./tools/ci/docker-containers-windows agentctl' - image: grafana/agent-build-image:0.33.0-windows + image: grafana/agent-build-image:0.40.2-windows name: Build container volumes: - name: docker @@ -343,7 +343,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -360,7 +360,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make agent - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -377,7 +377,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make agent - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -394,7 +394,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make agent - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -410,7 +410,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -426,7 +426,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agent - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -442,7 +442,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -458,7 +458,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agent - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -475,7 +475,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -492,7 +492,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make agent-flow - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -509,7 +509,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make agent-flow - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -526,7 +526,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make agent-flow - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -542,7 +542,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -558,7 +558,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agent-flow - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -574,7 +574,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -590,7 +590,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agent-flow - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -607,7 +607,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -624,7 +624,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make agentctl - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -641,7 +641,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make agentctl - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -658,7 +658,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make agentctl - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -674,7 +674,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -690,7 +690,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make agentctl - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -706,7 +706,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -722,7 +722,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make agentctl - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -739,7 +739,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -756,7 +756,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= make operator - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -773,7 +773,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=ppc64le GOARM= make operator - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -790,7 +790,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=s390x GOARM= make operator - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -806,7 +806,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -822,7 +822,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=darwin GOARCH=arm64 GOARM= make operator - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -838,7 +838,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -854,7 +854,7 @@ steps: - commands: - make generate-ui - GO_TAGS="builtinassets" GOOS=freebsd GOARCH=amd64 GOARM= make operator - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -871,7 +871,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=amd64 GOARM= GOEXPERIMENT=boringcrypto make agent-boringcrypto - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -888,7 +888,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets promtail_journal_enabled" GOOS=linux GOARCH=arm64 GOARM= GOEXPERIMENT=boringcrypto make agent-boringcrypto - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Build trigger: event: @@ -905,7 +905,7 @@ steps: - make generate-ui - GO_TAGS="builtinassets" GOOS=windows GOARCH=amd64 GOARM= GOEXPERIMENT=cngcrypto make agent-flow-windows-boringcrypto - image: grafana/agent-build-image:0.33.0-boringcrypto + image: grafana/agent-build-image:0.40.2-boringcrypto name: Build trigger: event: @@ -921,7 +921,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Configure QEMU volumes: - name: docker @@ -941,7 +941,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Publish container volumes: - name: docker @@ -965,7 +965,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Configure QEMU volumes: - name: docker @@ -985,7 +985,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Publish container volumes: - name: docker @@ -1009,7 +1009,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Configure QEMU volumes: - name: docker @@ -1029,7 +1029,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Publish container volumes: - name: docker @@ -1053,7 +1053,7 @@ steps: - commands: - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes failure: ignore - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Configure QEMU volumes: - name: docker @@ -1073,7 +1073,7 @@ steps: from_secret: docker_password GCR_CREDS: from_secret: gcr_admin - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Publish container volumes: - name: docker @@ -1102,7 +1102,7 @@ steps: from_secret: docker_login DOCKER_PASSWORD: from_secret: docker_password - image: grafana/agent-build-image:0.33.0-windows + image: grafana/agent-build-image:0.40.2-windows name: Build containers volumes: - name: docker @@ -1131,7 +1131,7 @@ steps: from_secret: docker_login DOCKER_PASSWORD: from_secret: docker_password - image: grafana/agent-build-image:0.33.0-windows + image: grafana/agent-build-image:0.40.2-windows name: Build containers volumes: - name: docker @@ -1248,7 +1248,7 @@ steps: from_secret: gpg_private_key GPG_PUBLIC_KEY: from_secret: gpg_public_key - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Publish release volumes: - name: docker @@ -1273,7 +1273,7 @@ steps: - DOCKER_OPTS="" make dist/grafana-agentctl-linux-amd64 - DOCKER_OPTS="" make dist.temp/grafana-agent-flow-linux-amd64 - DOCKER_OPTS="" make test-packages - image: grafana/agent-build-image:0.33.0 + image: grafana/agent-build-image:0.40.2 name: Test Linux system packages volumes: - name: docker @@ -1369,6 +1369,6 @@ kind: secret name: updater_private_key --- kind: signature -hmac: e77dab92983fe06b754e48fdad95fdb4cdb0a82e3daa23f841917d8ca98fb0ad +hmac: c94699f336cc58987f7c3ccd8660110c6366ea596831b24116ff41cc9987cfe9 ... diff --git a/CHANGELOG.md b/CHANGELOG.md index 223d3eae7b..a3db59532c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,8 @@ Main (unreleased) - Clustering for Grafana Agent in Flow mode has graduated from beta to stable. +- Upgrade to Go 1.22.1 (@thampiotr) + v0.40.2 (2024-03-05) -------------------- diff --git a/build-image/Dockerfile b/build-image/Dockerfile index 05125a2632..4e8ae49b39 100644 --- a/build-image/Dockerfile +++ b/build-image/Dockerfile @@ -4,7 +4,7 @@ # default when running `docker buildx build` or when DOCKER_BUILDKIT=1 is set # in environment variables. -# NOTE: The GO_RUNTIME is used to switch between the default google go runtime and mcr.microsoft.com/oss/go/microsoft/golang:1.22.1-bullseye which is a microsoft +# NOTE: The GO_RUNTIME is used to switch between the default Google go runtime and mcr.microsoft.com/oss/go/microsoft/golang:1.22.1-bullseye which is a Microsoft # fork of go that allows using windows crypto instead of boring crypto. Details at https://github.com/microsoft/go/tree/microsoft/main/eng/doc/fips ARG GO_RUNTIME=mustoverride diff --git a/build-image/windows/Dockerfile b/build-image/windows/Dockerfile index ddd3448e2c..3827b073f9 100644 --- a/build-image/windows/Dockerfile +++ b/build-image/windows/Dockerfile @@ -1,4 +1,4 @@ -FROM library/golang:1.22.0-windowsservercore-1809 +FROM library/golang:1.22.1-windowsservercore-1809 SHELL ["powershell", "-command"] diff --git a/cmd/grafana-agent-operator/Dockerfile b/cmd/grafana-agent-operator/Dockerfile index 6ba2a6218c..7e4f98f092 100644 --- a/cmd/grafana-agent-operator/Dockerfile +++ b/cmd/grafana-agent-operator/Dockerfile @@ -4,7 +4,7 @@ # default when running `docker buildx build` or when DOCKER_BUILDKIT=1 is set # in environment variables. -FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.33.0 as build +FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.40.2 as build ARG BUILDPLATFORM ARG TARGETPLATFORM ARG TARGETOS diff --git a/cmd/grafana-agent/Dockerfile b/cmd/grafana-agent/Dockerfile index 4c2a69dcb4..09b38ea7d6 100644 --- a/cmd/grafana-agent/Dockerfile +++ b/cmd/grafana-agent/Dockerfile @@ -4,7 +4,7 @@ # default when running `docker buildx build` or when DOCKER_BUILDKIT=1 is set # in environment variables. -FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.33.0 as build +FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.40.2 as build ARG BUILDPLATFORM ARG TARGETPLATFORM ARG TARGETOS diff --git a/cmd/grafana-agent/Dockerfile.windows b/cmd/grafana-agent/Dockerfile.windows index 6f99a6138f..7ace535312 100644 --- a/cmd/grafana-agent/Dockerfile.windows +++ b/cmd/grafana-agent/Dockerfile.windows @@ -1,4 +1,4 @@ -FROM grafana/agent-build-image:0.33.0-windows as builder +FROM grafana/agent-build-image:0.40.2-windows as builder ARG VERSION ARG RELEASE_BUILD=1 diff --git a/cmd/grafana-agentctl/Dockerfile b/cmd/grafana-agentctl/Dockerfile index 67fe64c6f3..f43179d418 100644 --- a/cmd/grafana-agentctl/Dockerfile +++ b/cmd/grafana-agentctl/Dockerfile @@ -4,7 +4,7 @@ # default when running `docker buildx build` or when DOCKER_BUILDKIT=1 is set # in environment variables. -FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.33.0 as build +FROM --platform=$BUILDPLATFORM grafana/agent-build-image:0.40.2 as build ARG BUILDPLATFORM ARG TARGETPLATFORM ARG TARGETOS diff --git a/cmd/grafana-agentctl/Dockerfile.windows b/cmd/grafana-agentctl/Dockerfile.windows index 8cf3c34a0a..7e4e502d30 100644 --- a/cmd/grafana-agentctl/Dockerfile.windows +++ b/cmd/grafana-agentctl/Dockerfile.windows @@ -1,4 +1,4 @@ -FROM grafana/agent-build-image:0.33.0-windows as builder +FROM grafana/agent-build-image:0.40.2-windows as builder ARG VERSION ARG RELEASE_BUILD=1 diff --git a/docs/Makefile b/docs/Makefile index ea9ddf0df3..e015240cc7 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -11,11 +11,11 @@ include docs.mk docs: check-cloudwatch-integration check-cloudwatch-integration: - $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.21-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go check /docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md - $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.21-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go check /docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md + $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.22.1-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go check /docs/sources/static/configuration/integrations/cloudwatch-exporter-config.md + $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.22.1-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go check /docs/sources/flow/reference/components/prometheus.exporter.cloudwatch.md generate-cloudwatch-integration: - $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.21-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go generate + $(PODMAN) run -v $(shell git rev-parse --show-toplevel):/repo -v $(shell pwd):/docs -w /repo golang:1.22.1-bullseye go run internal/static/integrations/cloudwatch_exporter/docs/doc.go generate sources/assets/hierarchy.svg: sources/operator/hierarchy.dot cat $< | $(PODMAN) run --rm -i nshine/dot dot -Tsvg > $@ diff --git a/go.mod b/go.mod index 47613a8f9c..4eb7edbf00 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/grafana/agent -go 1.21.0 +go 1.22.1 require ( cloud.google.com/go/pubsub v1.33.0 diff --git a/internal/cmd/integration-tests/configs/otel-metrics-gen/Dockerfile b/internal/cmd/integration-tests/configs/otel-metrics-gen/Dockerfile index 0270edbd0b..79cb37ca39 100644 --- a/internal/cmd/integration-tests/configs/otel-metrics-gen/Dockerfile +++ b/internal/cmd/integration-tests/configs/otel-metrics-gen/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21 as build +FROM golang:1.22.1 as build WORKDIR /app/ COPY go.mod go.sum ./ RUN go mod download diff --git a/internal/cmd/integration-tests/configs/prom-gen/Dockerfile b/internal/cmd/integration-tests/configs/prom-gen/Dockerfile index d1e0bfdcaf..b56b3f9f0b 100644 --- a/internal/cmd/integration-tests/configs/prom-gen/Dockerfile +++ b/internal/cmd/integration-tests/configs/prom-gen/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21 as build +FROM golang:1.22.1 as build WORKDIR /app/ COPY go.mod go.sum ./ RUN go mod download diff --git a/tools/make/build-container.mk b/tools/make/build-container.mk index 409d284e1f..0daf5becb0 100644 --- a/tools/make/build-container.mk +++ b/tools/make/build-container.mk @@ -34,7 +34,7 @@ # variable names should be passed through to the container. USE_CONTAINER ?= 0 -BUILD_IMAGE_VERSION ?= 0.33.0 +BUILD_IMAGE_VERSION ?= 0.40.2 BUILD_IMAGE ?= grafana/agent-build-image:$(BUILD_IMAGE_VERSION) DOCKER_OPTS ?= -it From f98d41a5a65518dada41ae32d9b2885add61974a Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Tue, 12 Mar 2024 16:37:50 +0200 Subject: [PATCH 05/12] otelcolconvert: support converting jaegerremotesampling extension (#6658) Signed-off-by: Paschalis Tsilias --- ...converter_jaegerremotesamplingextension.go | 73 +++++++++++++++++++ .../testdata/jaegerremotesampling.river | 38 ++++++++++ .../testdata/jaegerremotesampling.yaml | 43 +++++++++++ 3 files changed, 154 insertions(+) create mode 100644 internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go create mode 100644 internal/converter/internal/otelcolconvert/testdata/jaegerremotesampling.river create mode 100644 internal/converter/internal/otelcolconvert/testdata/jaegerremotesampling.yaml diff --git a/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go b/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go new file mode 100644 index 0000000000..2076a7290d --- /dev/null +++ b/internal/converter/internal/otelcolconvert/converter_jaegerremotesamplingextension.go @@ -0,0 +1,73 @@ +package otelcolconvert + +import ( + "fmt" + + "github.com/grafana/agent/internal/component/otelcol/extension/jaeger_remote_sampling" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/jaegerremotesampling" + "go.opentelemetry.io/collector/component" +) + +func init() { + converters = append(converters, jaegerRemoteSamplingExtensionConverter{}) +} + +type jaegerRemoteSamplingExtensionConverter struct{} + +func (jaegerRemoteSamplingExtensionConverter) Factory() component.Factory { + return jaegerremotesampling.NewFactory() +} + +func (jaegerRemoteSamplingExtensionConverter) InputComponentName() string { + return "otelcol.extension.jaeger_remote_sampling" +} + +func (jaegerRemoteSamplingExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { + var diags diag.Diagnostics + + label := state.FlowComponentLabel() + + args := toJaegerRemoteSamplingExtension(cfg.(*jaegerremotesampling.Config)) + block := common.NewBlockWithOverride([]string{"otelcol", "extension", "jaeger_remote_sampling"}, label, args) + + diags.Add( + diag.SeverityLevelInfo, + fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + ) + + state.Body().AppendBlock(block) + return diags +} + +func toJaegerRemoteSamplingExtension(cfg *jaegerremotesampling.Config) *jaeger_remote_sampling.Arguments { + if cfg == nil { + return nil + } + + var grpc *jaeger_remote_sampling.GRPCServerArguments + if cfg.GRPCServerSettings != nil { + grpc = (*jaeger_remote_sampling.GRPCServerArguments)(toGRPCServerArguments(cfg.GRPCServerSettings)) + } + var http *jaeger_remote_sampling.HTTPServerArguments + if cfg.HTTPServerSettings != nil { + http = (*jaeger_remote_sampling.HTTPServerArguments)(toHTTPServerArguments(cfg.HTTPServerSettings)) + } + var remote *jaeger_remote_sampling.GRPCClientArguments + if cfg.Source.Remote != nil { + r := toGRPCClientArguments(*cfg.Source.Remote) + remote = (*jaeger_remote_sampling.GRPCClientArguments)(&r) + } + + return &jaeger_remote_sampling.Arguments{ + GRPC: grpc, + HTTP: http, + Source: jaeger_remote_sampling.ArgumentsSource{ + Content: "", + Remote: remote, + File: cfg.Source.File, + ReloadInterval: cfg.Source.ReloadInterval, + }, + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/jaegerremotesampling.river b/internal/converter/internal/otelcolconvert/testdata/jaegerremotesampling.river new file mode 100644 index 0000000000..5d1efebc69 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/jaegerremotesampling.river @@ -0,0 +1,38 @@ +otelcol.extension.jaeger_remote_sampling "default" { + grpc { } + + http { } + + source { + remote { + endpoint = "jaeger-collector:14250" + } + reload_interval = "30s" + } +} + +otelcol.receiver.jaeger "default" { + protocols { + grpc { } + + thrift_http { } + + thrift_binary { + max_packet_size = "63KiB488B" + } + + thrift_compact { + max_packet_size = "63KiB488B" + } + } + + output { + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/jaegerremotesampling.yaml b/internal/converter/internal/otelcolconvert/testdata/jaegerremotesampling.yaml new file mode 100644 index 0000000000..d85b388771 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/jaegerremotesampling.yaml @@ -0,0 +1,43 @@ +extensions: + jaegerremotesampling: + # Our defaults have drifted from upstream so we explicitly set our defaults + # below by adding the 0.0.0.0 prefix for http.endpoint and grpc.endpoint. + http: + endpoint: "0.0.0.0:5778" + grpc: + endpoint: "0.0.0.0:14250" + source: + reload_interval: 30s + remote: + endpoint: jaeger-collector:14250 + # Our defaults have drifted from upstream so we explicitly set our + # defaults below for the remote block that is used as GRPC client + # arguments (balancer_name, compression, write_buffer_size). + balancer_name: pick_first + compression: "gzip" + write_buffer_size: 524288 # 512 * 1024 + +receivers: + jaeger: + protocols: + grpc: + thrift_binary: + thrift_compact: + thrift_http: + +exporters: + otlp: + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below (balancer_name and queue_size). + endpoint: database:4317 + balancer_name: pick_first + sending_queue: + queue_size: 5000 + +service: + extensions: [jaegerremotesampling] + pipelines: + traces: + receivers: [jaeger] + processors: [] + exporters: [otlp] From d550490f9b6c40f1d3e1c786f99bbaef5e1415a2 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Tue, 12 Mar 2024 18:20:32 +0200 Subject: [PATCH 06/12] go.mod: retract accidentally pushed tags (#6662) Signed-off-by: Paschalis Tsilias --- go.mod | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/go.mod b/go.mod index 4eb7edbf00..baa859aaff 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,12 @@ module github.com/grafana/agent go 1.22.1 +retract ( + v1.3.191 // Published accidentally + v1.2.99 // Published accidentally + v1.2.99-rc1 // Published accidentally +) + require ( cloud.google.com/go/pubsub v1.33.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 From 24c0432f582b12b5eae83aceaff8a04340240c1e Mon Sep 17 00:00:00 2001 From: Erik Baranowski <39704712+erikbaranowski@users.noreply.github.com> Date: Tue, 12 Mar 2024 14:48:35 -0400 Subject: [PATCH 07/12] Improve converter diagnostic output (#6549) * Improve converter diagnostic output by including a Footer and removing lower level diagnostics when a configuration fails to generate. Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> --------- Signed-off-by: erikbaranowski <39704712+erikbaranowski@users.noreply.github.com> --- CHANGELOG.md | 4 +- internal/converter/diag/diagnostics.go | 15 ++++- internal/converter/diag/report.go | 39 ++++++++++++- internal/converter/diag/report_test.go | 81 ++++++++++++++++++++++++++ internal/flowmode/cmd_convert.go | 2 +- 5 files changed, 133 insertions(+), 8 deletions(-) create mode 100644 internal/converter/diag/report_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index a3db59532c..04362c9e0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,9 @@ Main (unreleased) - Add support for importing directories as single module to `import.git`. (@wildum) +- Improve converter diagnostic output by including a Footer and removing lower + level diagnostics when a configuration fails to generate. (@erikbaranowski) + ### Features - Added a new CLI flag `--stability.level` which defines the minimum stability @@ -51,7 +54,6 @@ v0.40.2 (2024-03-05) - Fix an issue where Loki could reject a batch of logs when structured metadata feature is used. (@thampiotr) -======= - Fix a duplicate metrics registration panic when recreating static mode metric instance's write handler. (@rfratto, @hainenber) diff --git a/internal/converter/diag/diagnostics.go b/internal/converter/diag/diagnostics.go index 6c3fcfba45..94f416e6dd 100644 --- a/internal/converter/diag/diagnostics.go +++ b/internal/converter/diag/diagnostics.go @@ -46,12 +46,12 @@ func (ds Diagnostics) Error() string { return sb.String() } -func (ds Diagnostics) GenerateReport(writer io.Writer, reportType string) error { +func (ds Diagnostics) GenerateReport(writer io.Writer, reportType string, bypassErrors bool) error { switch reportType { case Text: - return generateTextReport(writer, ds) + return generateTextReport(writer, ds, bypassErrors) default: - return fmt.Errorf("Invalid diagnostic report type %q", reportType) + return fmt.Errorf("invalid diagnostic report type %q", reportType) } } @@ -66,3 +66,12 @@ func (ds *Diagnostics) RemoveDiagsBySeverity(severity Severity) { *ds = newDiags } + +func (ds *Diagnostics) HasSeverityLevel(severity Severity) bool { + for _, diag := range *ds { + if diag.Severity == severity { + return true + } + } + return false +} diff --git a/internal/converter/diag/report.go b/internal/converter/diag/report.go index 89d2a0e2b7..a41ed02ecd 100644 --- a/internal/converter/diag/report.go +++ b/internal/converter/diag/report.go @@ -6,10 +6,23 @@ import ( const Text = ".txt" -// generateTextReport generates a text report for the diagnostics. -func generateTextReport(writer io.Writer, ds Diagnostics) error { - content := ds.Error() +const criticalErrorFooter = ` + +A configuration file was not generated due to critical issues. Refer to the critical messages for more information.` + +const errorFooter = ` + +A configuration file was not generated due to errors. Refer to the error messages for more information. + +You can bypass the errors by using the --bypass-errors flag. Bypassing errors isn't recommended for production environments.` + +const successFooter = ` +A configuration file was generated successfully.` + +// generateTextReport generates a text report for the diagnostics. +func generateTextReport(writer io.Writer, ds Diagnostics, bypassErrors bool) error { + content := getContent(ds, bypassErrors) _, err := writer.Write([]byte(content)) if err != nil { return err @@ -17,3 +30,23 @@ func generateTextReport(writer io.Writer, ds Diagnostics) error { return nil } + +// getContent returns the formatted content for the report based on the diagnostics and bypassErrors. +func getContent(ds Diagnostics, bypassErrors bool) string { + var content string + switch { + case ds.HasSeverityLevel(SeverityLevelCritical): + content = criticalErrorFooter + ds.RemoveDiagsBySeverity(SeverityLevelInfo) + ds.RemoveDiagsBySeverity(SeverityLevelWarn) + ds.RemoveDiagsBySeverity(SeverityLevelError) + case ds.HasSeverityLevel(SeverityLevelError) && !bypassErrors: + content = errorFooter + ds.RemoveDiagsBySeverity(SeverityLevelInfo) + ds.RemoveDiagsBySeverity(SeverityLevelWarn) + default: + content = successFooter + } + + return ds.Error() + content +} diff --git a/internal/converter/diag/report_test.go b/internal/converter/diag/report_test.go new file mode 100644 index 0000000000..4bca751594 --- /dev/null +++ b/internal/converter/diag/report_test.go @@ -0,0 +1,81 @@ +package diag + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDiagReporting(t *testing.T) { + var ( + criticalDiagnostic = Diagnostic{ + Severity: SeverityLevelCritical, + Summary: "this is a critical diag", + } + errorDiagnostic = Diagnostic{ + Severity: SeverityLevelError, + Summary: "this is an error diag", + } + warnDiagnostic = Diagnostic{ + Severity: SeverityLevelWarn, + Summary: "this is a warn diag", + } + infoDiagnostic = Diagnostic{ + Severity: SeverityLevelInfo, + Summary: "this is an info diag", + } + ) + + tt := []struct { + name string + diags Diagnostics + bypassErrors bool + expectedMessage string + }{ + { + name: "Empty", + diags: Diagnostics{}, + expectedMessage: successFooter, + }, + { + name: "Critical", + diags: Diagnostics{criticalDiagnostic, errorDiagnostic, warnDiagnostic, infoDiagnostic}, + expectedMessage: `(Critical) this is a critical diag` + criticalErrorFooter, + }, + { + name: "Error", + diags: Diagnostics{errorDiagnostic, warnDiagnostic, infoDiagnostic}, + expectedMessage: `(Error) this is an error diag` + errorFooter, + }, + { + name: "Bypass Error", + diags: Diagnostics{errorDiagnostic, warnDiagnostic, infoDiagnostic}, + bypassErrors: true, + expectedMessage: `(Error) this is an error diag +(Warning) this is a warn diag +(Info) this is an info diag` + successFooter, + }, + { + name: "Warn", + diags: Diagnostics{warnDiagnostic, infoDiagnostic}, + expectedMessage: `(Warning) this is a warn diag +(Info) this is an info diag` + successFooter, + }, + { + name: "Info", + diags: Diagnostics{infoDiagnostic}, + expectedMessage: `(Info) this is an info diag` + successFooter, + }, + } + + for _, tc := range tt { + t.Run(tc.name, func(t *testing.T) { + var buf bytes.Buffer + err := generateTextReport(&buf, tc.diags, tc.bypassErrors) + require.NoError(t, err) + + require.Equal(t, tc.expectedMessage, buf.String()) + }) + } +} diff --git a/internal/flowmode/cmd_convert.go b/internal/flowmode/cmd_convert.go index bffc9ef1f3..1150ef0b16 100644 --- a/internal/flowmode/cmd_convert.go +++ b/internal/flowmode/cmd_convert.go @@ -163,7 +163,7 @@ func generateConvertReport(diags convert_diag.Diagnostics, fc *flowConvert) erro } defer file.Close() - return diags.GenerateReport(file, convert_diag.Text) + return diags.GenerateReport(file, convert_diag.Text, fc.bypassErrors) } return nil From 62f8337aefb5dac54fab9430a66c9c239d033dc2 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Wed, 13 Mar 2024 11:40:54 +0200 Subject: [PATCH 08/12] otelcolconvert: support converting oauth2 client auth extension (#6657) Signed-off-by: Paschalis Tsilias --- .../components/otelcol.auth.oauth2.md | 13 +++- .../component/otelcol/auth/oauth2/oauth2.go | 32 +++++----- .../converter_oauth2clientauthextension.go | 53 ++++++++++++++++ .../otelcolconvert/testdata/oauth2.river | 44 +++++++++++++ .../otelcolconvert/testdata/oauth2.yaml | 61 +++++++++++++++++++ 5 files changed, 187 insertions(+), 16 deletions(-) create mode 100644 internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go create mode 100644 internal/converter/internal/otelcolconvert/testdata/oauth2.river create mode 100644 internal/converter/internal/otelcolconvert/testdata/oauth2.yaml diff --git a/docs/sources/flow/reference/components/otelcol.auth.oauth2.md b/docs/sources/flow/reference/components/otelcol.auth.oauth2.md index 4584f47eb7..c58d93e56d 100644 --- a/docs/sources/flow/reference/components/otelcol.auth.oauth2.md +++ b/docs/sources/flow/reference/components/otelcol.auth.oauth2.md @@ -39,8 +39,10 @@ otelcol.auth.oauth2 "LABEL" { Name | Type | Description | Default | Required ---- | ---- | ----------- | ------- | -------- -`client_id` | `string` | The client identifier issued to the client. | | yes -`client_secret` | `secret` | The secret string associated with the client identifier. | | yes +`client_id` | `string` | The client identifier issued to the client. | | no +`client_id_file` | `string` | The file path to retrieve the client identifier issued to the client. | | no +`client_secret` | `secret` | The secret string associated with the client identifier. | | no +`client_secret_file` | `secret` | The file path to retrieve the secret string associated with the client identifier. | | no `token_url` | `string` | The server endpoint URL from which to get tokens. | | yes `endpoint_params` | `map(list(string))` | Additional parameters that are sent to the token endpoint. | `{}` | no `scopes` | `list(string)` | Requested permissions associated for the client. | `[]` | no @@ -48,6 +50,13 @@ Name | Type | Description | Default | Required The `timeout` argument is used both for requesting initial tokens and for refreshing tokens. `"0s"` implies no timeout. +At least one of the `client_id` and `client_id_file` pair of arguments must be +set. In case both are set, `client_id_file` takes precedence. + +Similarly, at least one of the `client_secret` and `client_secret_file` pair of +arguments must be set. In case both are set, `client_secret_file` also takes +precedence. + ## Blocks The following blocks are supported inside the definition of diff --git a/internal/component/otelcol/auth/oauth2/oauth2.go b/internal/component/otelcol/auth/oauth2/oauth2.go index 95f88c5072..b90164711b 100644 --- a/internal/component/otelcol/auth/oauth2/oauth2.go +++ b/internal/component/otelcol/auth/oauth2/oauth2.go @@ -31,13 +31,15 @@ func init() { // Arguments configures the otelcol.auth.oauth2 component. type Arguments struct { - ClientID string `river:"client_id,attr"` - ClientSecret rivertypes.Secret `river:"client_secret,attr"` - TokenURL string `river:"token_url,attr"` - EndpointParams url.Values `river:"endpoint_params,attr,optional"` - Scopes []string `river:"scopes,attr,optional"` - TLSSetting otelcol.TLSClientArguments `river:"tls,block,optional"` - Timeout time.Duration `river:"timeout,attr,optional"` + ClientID string `river:"client_id,attr,optional"` + ClientIDFile string `river:"client_id_file,attr,optional"` + ClientSecret rivertypes.Secret `river:"client_secret,attr,optional"` + ClientSecretFile string `river:"client_secret_file,attr,optional"` + TokenURL string `river:"token_url,attr"` + EndpointParams url.Values `river:"endpoint_params,attr,optional"` + Scopes []string `river:"scopes,attr,optional"` + TLSSetting otelcol.TLSClientArguments `river:"tls,block,optional"` + Timeout time.Duration `river:"timeout,attr,optional"` } var _ auth.Arguments = Arguments{} @@ -45,13 +47,15 @@ var _ auth.Arguments = Arguments{} // Convert implements auth.Arguments. func (args Arguments) Convert() (otelcomponent.Config, error) { return &oauth2clientauthextension.Config{ - ClientID: args.ClientID, - ClientSecret: configopaque.String(args.ClientSecret), - TokenURL: args.TokenURL, - EndpointParams: args.EndpointParams, - Scopes: args.Scopes, - TLSSetting: *args.TLSSetting.Convert(), - Timeout: args.Timeout, + ClientID: args.ClientID, + ClientIDFile: args.ClientIDFile, + ClientSecret: configopaque.String(args.ClientSecret), + ClientSecretFile: args.ClientSecretFile, + TokenURL: args.TokenURL, + EndpointParams: args.EndpointParams, + Scopes: args.Scopes, + TLSSetting: *args.TLSSetting.Convert(), + Timeout: args.Timeout, }, nil } diff --git a/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go new file mode 100644 index 0000000000..14ba01ea91 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/converter_oauth2clientauthextension.go @@ -0,0 +1,53 @@ +package otelcolconvert + +import ( + "fmt" + + "github.com/grafana/agent/internal/component/otelcol/auth/oauth2" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" + "github.com/grafana/river/rivertypes" + "github.com/open-telemetry/opentelemetry-collector-contrib/extension/oauth2clientauthextension" + "go.opentelemetry.io/collector/component" +) + +func init() { + converters = append(converters, oauth2ClientAuthExtensionConverter{}) +} + +type oauth2ClientAuthExtensionConverter struct{} + +func (oauth2ClientAuthExtensionConverter) Factory() component.Factory { + return oauth2clientauthextension.NewFactory() +} + +func (oauth2ClientAuthExtensionConverter) InputComponentName() string { return "otelcol.auth.oauth2" } + +func (oauth2ClientAuthExtensionConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { + var diags diag.Diagnostics + + label := state.FlowComponentLabel() + + args := toOAuth2ClientAuthExtension(cfg.(*oauth2clientauthextension.Config)) + block := common.NewBlockWithOverride([]string{"otelcol", "auth", "oauth2"}, label, args) + + diags.Add( + diag.SeverityLevelInfo, + fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + ) + + state.Body().AppendBlock(block) + return diags +} + +func toOAuth2ClientAuthExtension(cfg *oauth2clientauthextension.Config) *oauth2.Arguments { + return &oauth2.Arguments{ + ClientID: cfg.ClientID, + ClientSecret: rivertypes.Secret(cfg.ClientSecret), + TokenURL: cfg.TokenURL, + EndpointParams: cfg.EndpointParams, + Scopes: cfg.Scopes, + TLSSetting: toTLSClientArguments(cfg.TLSSetting), + Timeout: cfg.Timeout, + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/oauth2.river b/internal/converter/internal/otelcolconvert/testdata/oauth2.river new file mode 100644 index 0000000000..9a125399bc --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/oauth2.river @@ -0,0 +1,44 @@ +otelcol.auth.oauth2 "default" { + client_id = "someclientid" + client_secret = "someclientsecret" + token_url = "https://example.com/oauth2/default/v1/token" + endpoint_params = { + audience = ["someaudience"], + } + scopes = ["api.metrics"] + + tls { + ca_file = "/var/lib/mycert.pem" + cert_file = "certfile" + key_file = "keyfile" + insecure = true + } + timeout = "2s" +} + +otelcol.receiver.otlp "default" { + grpc { } + + output { + metrics = [otelcol.exporter.otlp.default_withauth.input, otelcol.exporter.otlphttp.default_noauth.input] + logs = [otelcol.exporter.otlp.default_withauth.input, otelcol.exporter.otlphttp.default_noauth.input] + traces = [otelcol.exporter.otlp.default_withauth.input, otelcol.exporter.otlphttp.default_noauth.input] + } +} + +otelcol.exporter.otlp "default_withauth" { + client { + endpoint = "database:4317" + + tls { + ca_file = "/tmp/certs/ca.pem" + } + auth = otelcol.auth.oauth2.default.handler + } +} + +otelcol.exporter.otlphttp "default_noauth" { + client { + endpoint = "database:4318" + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/oauth2.yaml b/internal/converter/internal/otelcolconvert/testdata/oauth2.yaml new file mode 100644 index 0000000000..d337d40bca --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/oauth2.yaml @@ -0,0 +1,61 @@ +extensions: + oauth2client/noop: # this extension is not defined in services and shouldn't be converted + client_id: dummyclientid + client_secret: dummyclientsecret + token_url: https://example.com/oauth2/default/v1/token + oauth2client: + client_id: someclientid + client_secret: someclientsecret + endpoint_params: + audience: someaudience + token_url: https://example.com/oauth2/default/v1/token + scopes: ["api.metrics"] + # tls settings for the token client + tls: + insecure: true + ca_file: /var/lib/mycert.pem + cert_file: certfile + key_file: keyfile + # timeout for the token client + timeout: 2s + +receivers: + otlp: + protocols: + grpc: + +exporters: + otlphttp/noauth: + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below for queue_size. + endpoint: database:4318 + sending_queue: + queue_size: 5000 + + otlp/withauth: + tls: + ca_file: /tmp/certs/ca.pem + auth: + authenticator: oauth2client + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below (balancer_name and queue_size). + endpoint: database:4317 + balancer_name: pick_first + sending_queue: + queue_size: 5000 + +service: + extensions: [oauth2client] + pipelines: + metrics: + receivers: [otlp] + processors: [] + exporters: [otlp/withauth, otlphttp/noauth] + logs: + receivers: [otlp] + processors: [] + exporters: [otlp/withauth, otlphttp/noauth] + traces: + receivers: [otlp] + processors: [] + exporters: [otlp/withauth, otlphttp/noauth] From 954f41122b2fa387ef5e6b2d5845009925d9d169 Mon Sep 17 00:00:00 2001 From: Piotr <17101802+thampiotr@users.noreply.github.com> Date: Wed, 13 Mar 2024 11:04:11 +0000 Subject: [PATCH 09/12] Attempt to fix main: separate clean in windows docker images (#6671) --- cmd/grafana-agent/Dockerfile.windows | 7 ++++++- cmd/grafana-agentctl/Dockerfile.windows | 8 +++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/cmd/grafana-agent/Dockerfile.windows b/cmd/grafana-agent/Dockerfile.windows index 7ace535312..1ebc3346f1 100644 --- a/cmd/grafana-agent/Dockerfile.windows +++ b/cmd/grafana-agent/Dockerfile.windows @@ -10,7 +10,12 @@ SHELL ["cmd", "/S", "/C"] # Creating new layers can be really slow on Windows so we clean up any caches # we can before moving on to the next step. RUN ""C:\Program Files\git\bin\bash.exe" -c "RELEASE_BUILD=${RELEASE_BUILD} VERSION=${VERSION} make generate-ui && rm -rf web/ui/node_modules && yarn cache clean --all"" -RUN ""C:\Program Files\git\bin\bash.exe" -c "RELEASE_BUILD=${RELEASE_BUILD} VERSION=${VERSION} GO_TAGS='builtinassets' make agent && go clean -cache -modcache"" + +RUN ""C:\Program Files\git\bin\bash.exe" -c "RELEASE_BUILD=${RELEASE_BUILD} VERSION=${VERSION} GO_TAGS='builtinassets' make agent"" +# In this case, we're separating the clean command from make agent to avoid an issue where access to some mod cache +# files is denied immediately after make agent, for example: +# "go: remove C:\go\pkg\mod\golang.org\toolchain@v0.0.1-go1.22.1.windows-amd64\bin\go.exe: Access is denied." +RUN ""C:\Program Files\git\bin\bash.exe" -c "go clean -cache -modcache"" # Use the smallest container possible for the final image FROM mcr.microsoft.com/windows/nanoserver:1809 diff --git a/cmd/grafana-agentctl/Dockerfile.windows b/cmd/grafana-agentctl/Dockerfile.windows index 7e4e502d30..77df750b9d 100644 --- a/cmd/grafana-agentctl/Dockerfile.windows +++ b/cmd/grafana-agentctl/Dockerfile.windows @@ -7,9 +7,11 @@ WORKDIR /src/agent SHELL ["cmd", "/S", "/C"] -# Creating new layers can be really slow on Windows so we clean up any caches -# we can before moving on to the next step. -RUN ""C:\Program Files\git\bin\bash.exe" -c "RELEASE_BUILD=${RELEASE_BUILD} VERSION=${VERSION} make agentctl && go clean -cache -modcache"" +RUN ""C:\Program Files\git\bin\bash.exe" -c "RELEASE_BUILD=${RELEASE_BUILD} VERSION=${VERSION} make agentctl"" +# We're separating the clean command from make agent to avoid an issue where access to some mod cache +# files is denied immediately after make agentctl, for example: +# "go: remove C:\go\pkg\mod\golang.org\toolchain@v0.0.1-go1.22.1.windows-amd64\bin\go.exe: Access is denied." +RUN ""C:\Program Files\git\bin\bash.exe" -c "go clean -cache -modcache"" # Use the smallest container possible for the final image FROM mcr.microsoft.com/windows/nanoserver:1809 From 677b6877abe27ec5fa6b9ed35880c4e1952477f6 Mon Sep 17 00:00:00 2001 From: mattdurham Date: Wed, 13 Mar 2024 06:34:00 -0700 Subject: [PATCH 10/12] Change git operations from fetch to pull (#6668) * Update to using FETCH_HEAD since we are using FETCH which doesnt update HEAD. * Update to using FETCH_HEAD since we are using FETCH which doesnt update HEAD. * Use pull and not fetch * add comments * fix linting * Update CHANGELOG.md Co-authored-by: Robert Fratto --------- Co-authored-by: Robert Fratto --- CHANGELOG.md | 2 + .../flow/reference/components/module.git.md | 12 +-- internal/flow/import_test.go | 96 +++++++++++++++++++ internal/vcs/git.go | 25 +++-- 4 files changed, 120 insertions(+), 15 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 04362c9e0a..3bde8a8576 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,8 @@ Main (unreleased) - Fix a bug where structured metadata and parsed field are not passed further in `loki.source.api` (@marchellodev) +- Change `import.git` to use Git pulls rather than fetches to fix scenarios where the local code did not get updated. (@mattdurham) + ### Other changes - Clustering for Grafana Agent in Flow mode has graduated from beta to stable. diff --git a/docs/sources/flow/reference/components/module.git.md b/docs/sources/flow/reference/components/module.git.md index 44bdee36a0..90b8dae130 100644 --- a/docs/sources/flow/reference/components/module.git.md +++ b/docs/sources/flow/reference/components/module.git.md @@ -41,12 +41,12 @@ module.git "LABEL" { The following arguments are supported: -Name | Type | Description | Default | Required ----- | ---- | ----------- | ------- | -------- -`repository` | `string` | The Git repository address to retrieve the module from. | | yes -`revision` | `string` | The Git revision to retrieve the module from. | `"HEAD"` | no -`path` | `string` | The path in the repository where the module is stored. | | yes -`pull_frequency` | `duration` | The frequency to pull the repository for updates. | `"60s"` | no +Name | Type | Description | Default | Required +-----------------|------------|---------------------------------------------------------|----------|--------- +`repository` | `string` | The Git repository address to retrieve the module from. | | yes +`revision` | `string` | The Git revision to retrieve the module from. | `"HEAD"` | no +`path` | `string` | The path in the repository where the module is stored. | | yes +`pull_frequency` | `duration` | The frequency to pull the repository for updates. | `"60s"` | no The `repository` attribute must be set to a repository address that would be recognized by Git with a `git clone REPOSITORY_ADDRESS` command, such as diff --git a/internal/flow/import_test.go b/internal/flow/import_test.go index 3d2e7a47f7..2d2b972aa2 100644 --- a/internal/flow/import_test.go +++ b/internal/flow/import_test.go @@ -4,6 +4,7 @@ import ( "context" "io/fs" "os" + "os/exec" "path/filepath" "strings" "sync" @@ -250,6 +251,101 @@ func TestImportError(t *testing.T) { } } +func TestPullUpdating(t *testing.T) { + // Previously we used fetch instead of pull, which would set the FETCH_HEAD but not HEAD + // This caused changes not to propagate if there were changes, since HEAD was pinned to whatever it was on the initial download. + // Switching to pull removes this problem at the expense of network bandwidth. + // Tried switching to FETCH_HEAD but FETCH_HEAD is only set on fetch and not initial repo clone so we would need to + // remember to always call fetch after clone. + // + // This test ensures we can pull the correct values down if they update no matter what, it works by creating a local + // file based git repo then committing a file, running the component, then updating the file in the repo. + testRepo := t.TempDir() + + contents := `declare "add" { + argument "a" {} + argument "b" {} + + export "sum" { + value = argument.a.value + argument.b.value + } +}` + main := ` +import.git "testImport" { + repository = "` + testRepo + `" + path = "math.river" + pull_frequency = "5s" +} + +testImport.add "cc" { + a = 1 + b = 1 +} +` + init := exec.Command("git", "init", testRepo) + err := init.Run() + require.NoError(t, err) + math := filepath.Join(testRepo, "math.river") + err = os.WriteFile(math, []byte(contents), 0666) + require.NoError(t, err) + add := exec.Command("git", "add", ".") + add.Dir = testRepo + err = add.Run() + require.NoError(t, err) + commit := exec.Command("git", "commit", "-m \"test\"") + commit.Dir = testRepo + err = commit.Run() + require.NoError(t, err) + + defer verifyNoGoroutineLeaks(t) + ctrl, f := setup(t, main) + err = ctrl.LoadSource(f, nil) + require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + + var wg sync.WaitGroup + defer func() { + cancel() + wg.Wait() + }() + + wg.Add(1) + go func() { + defer wg.Done() + ctrl.Run(ctx) + }() + + // Check for initial condition + require.Eventually(t, func() bool { + export := getExport[map[string]interface{}](t, ctrl, "", "testImport.add.cc") + return export["sum"] == 2 + }, 3*time.Second, 10*time.Millisecond) + + contentsMore := `declare "add" { + argument "a" {} + argument "b" {} + + export "sum" { + value = argument.a.value + argument.b.value + 1 + } +}` + err = os.WriteFile(math, []byte(contentsMore), 0666) + require.NoError(t, err) + add2 := exec.Command("git", "add", ".") + add2.Dir = testRepo + add2.Run() + + commit2 := exec.Command("git", "commit", "-m \"test2\"") + commit2.Dir = testRepo + commit2.Run() + + // Check for final condition. + require.Eventually(t, func() bool { + export := getExport[map[string]interface{}](t, ctrl, "", "testImport.add.cc") + return export["sum"] == 3 + }, 20*time.Second, 1*time.Millisecond) +} + func testConfig(t *testing.T, config string, reloadConfig string, update func()) { defer verifyNoGoroutineLeaks(t) ctrl, f := setup(t, config) diff --git a/internal/vcs/git.go b/internal/vcs/git.go index 903826c45d..788c1029f7 100644 --- a/internal/vcs/git.go +++ b/internal/vcs/git.go @@ -30,7 +30,7 @@ type GitRepo struct { // managed at storagePath. // // If storagePath is empty on disk, NewGitRepo initializes GitRepo by cloning -// the repository. Otherwise, NewGitRepo will do a fetch. +// the repository. Otherwise, NewGitRepo will do a pull. // // After GitRepo is initialized, it checks out to the Revision specified in // GitRepoOptions. @@ -58,13 +58,20 @@ func NewGitRepo(ctx context.Context, storagePath string, opts GitRepoOptions) (* } } - // Fetch the latest contents. This may be a no-op if we just did a clone. - fetchRepoErr := repo.FetchContext(ctx, &git.FetchOptions{ + // Pulls the latest contents. This may be a no-op if we just did a clone. + wt, err := repo.Worktree() + if err != nil { + return nil, DownloadFailedError{ + Repository: opts.Repository, + Inner: err, + } + } + pullRepoErr := wt.PullContext(ctx, &git.PullOptions{ RemoteName: "origin", Force: true, Auth: opts.Auth.Convert(), }) - if fetchRepoErr != nil && !errors.Is(fetchRepoErr, git.NoErrAlreadyUpToDate) { + if pullRepoErr != nil && !errors.Is(pullRepoErr, git.NoErrAlreadyUpToDate) { workTree, err := repo.Worktree() if err != nil { return nil, err @@ -75,7 +82,7 @@ func NewGitRepo(ctx context.Context, storagePath string, opts GitRepoOptions) (* workTree: workTree, }, UpdateFailedError{ Repository: opts.Repository, - Inner: fetchRepoErr, + Inner: pullRepoErr, } } @@ -109,19 +116,19 @@ func isRepoCloned(dir string) bool { return dirError == nil && len(fi) > 0 } -// Update updates the repository by fetching new content and re-checking out to +// Update updates the repository by pulling new content and re-checking out to // latest version of Revision. func (repo *GitRepo) Update(ctx context.Context) error { var err error - fetchRepoErr := repo.repo.FetchContext(ctx, &git.FetchOptions{ + pullRepoErr := repo.workTree.PullContext(ctx, &git.PullOptions{ RemoteName: "origin", Force: true, Auth: repo.opts.Auth.Convert(), }) - if fetchRepoErr != nil && !errors.Is(fetchRepoErr, git.NoErrAlreadyUpToDate) { + if pullRepoErr != nil && !errors.Is(pullRepoErr, git.NoErrAlreadyUpToDate) { return UpdateFailedError{ Repository: repo.opts.Repository, - Inner: fetchRepoErr, + Inner: pullRepoErr, } } From 3b24a9d954413f91155b0806e6e80cec6831cbf1 Mon Sep 17 00:00:00 2001 From: Paschalis Tsilias Date: Wed, 13 Mar 2024 17:13:29 +0200 Subject: [PATCH 11/12] otelcolconvert: support converting transform processor (#6521) Signed-off-by: Paschalis Tsilias --- .../otelcol/processor/transform/transform.go | 14 ++-- .../converter_transformprocessor.go | 75 +++++++++++++++++++ .../otelcolconvert/testdata/transform.river | 62 +++++++++++++++ .../otelcolconvert/testdata/transform.yaml | 75 +++++++++++++++++++ 4 files changed, 219 insertions(+), 7 deletions(-) create mode 100644 internal/converter/internal/otelcolconvert/converter_transformprocessor.go create mode 100644 internal/converter/internal/otelcolconvert/testdata/transform.river create mode 100644 internal/converter/internal/otelcolconvert/testdata/transform.yaml diff --git a/internal/component/otelcol/processor/transform/transform.go b/internal/component/otelcol/processor/transform/transform.go index 708ce7cdc4..aabae21e4c 100644 --- a/internal/component/otelcol/processor/transform/transform.go +++ b/internal/component/otelcol/processor/transform/transform.go @@ -53,9 +53,9 @@ func (c *ContextID) UnmarshalText(text []byte) error { } } -type contextStatementsSlice []contextStatements +type ContextStatementsSlice []ContextStatements -type contextStatements struct { +type ContextStatements struct { Context ContextID `river:"context,attr"` Statements []string `river:"statements,attr"` } @@ -64,9 +64,9 @@ type contextStatements struct { type Arguments struct { // ErrorMode determines how the processor reacts to errors that occur while processing a statement. ErrorMode ottl.ErrorMode `river:"error_mode,attr,optional"` - TraceStatements contextStatementsSlice `river:"trace_statements,block,optional"` - MetricStatements contextStatementsSlice `river:"metric_statements,block,optional"` - LogStatements contextStatementsSlice `river:"log_statements,block,optional"` + TraceStatements ContextStatementsSlice `river:"trace_statements,block,optional"` + MetricStatements ContextStatementsSlice `river:"metric_statements,block,optional"` + LogStatements ContextStatementsSlice `river:"log_statements,block,optional"` // Output configures where to send processed data. Required. Output *otelcol.ConsumerArguments `river:"output,block"` @@ -95,7 +95,7 @@ func (args *Arguments) Validate() error { return otelArgs.Validate() } -func (stmts *contextStatementsSlice) convert() []interface{} { +func (stmts *ContextStatementsSlice) convert() []interface{} { if stmts == nil { return nil } @@ -112,7 +112,7 @@ func (stmts *contextStatementsSlice) convert() []interface{} { return res } -func (args *contextStatements) convert() map[string]interface{} { +func (args *ContextStatements) convert() map[string]interface{} { if args == nil { return nil } diff --git a/internal/converter/internal/otelcolconvert/converter_transformprocessor.go b/internal/converter/internal/otelcolconvert/converter_transformprocessor.go new file mode 100644 index 0000000000..694046bb21 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/converter_transformprocessor.go @@ -0,0 +1,75 @@ +package otelcolconvert + +import ( + "fmt" + + "github.com/grafana/agent/internal/component/otelcol" + "github.com/grafana/agent/internal/component/otelcol/processor/transform" + "github.com/grafana/agent/internal/converter/diag" + "github.com/grafana/agent/internal/converter/internal/common" + "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor" + "go.opentelemetry.io/collector/component" +) + +func init() { + converters = append(converters, transformProcessorConverter{}) +} + +type transformProcessorConverter struct{} + +func (transformProcessorConverter) Factory() component.Factory { + return transformprocessor.NewFactory() +} + +func (transformProcessorConverter) InputComponentName() string { + return "otelcol.processor.transform" +} + +func (transformProcessorConverter) ConvertAndAppend(state *state, id component.InstanceID, cfg component.Config) diag.Diagnostics { + var diags diag.Diagnostics + + label := state.FlowComponentLabel() + + args := toTransformProcessor(state, id, cfg.(*transformprocessor.Config)) + block := common.NewBlockWithOverride([]string{"otelcol", "processor", "transform"}, label, args) + + diags.Add( + diag.SeverityLevelInfo, + fmt.Sprintf("Converted %s into %s", stringifyInstanceID(id), stringifyBlock(block)), + ) + + state.Body().AppendBlock(block) + return diags +} + +func toTransformProcessor(state *state, id component.InstanceID, cfg *transformprocessor.Config) *transform.Arguments { + var ( + nextMetrics = state.Next(id, component.DataTypeMetrics) + nextLogs = state.Next(id, component.DataTypeLogs) + nextTraces = state.Next(id, component.DataTypeTraces) + ) + + return &transform.Arguments{ + ErrorMode: cfg.ErrorMode, + TraceStatements: toContextStatements(encodeMapslice(cfg.TraceStatements)), + MetricStatements: toContextStatements(encodeMapslice(cfg.MetricStatements)), + LogStatements: toContextStatements(encodeMapslice(cfg.LogStatements)), + Output: &otelcol.ConsumerArguments{ + Metrics: toTokenizedConsumers(nextMetrics), + Logs: toTokenizedConsumers(nextLogs), + Traces: toTokenizedConsumers(nextTraces), + }, + } +} + +func toContextStatements(in []map[string]any) []transform.ContextStatements { + res := make([]transform.ContextStatements, 0, len(in)) + for _, s := range in { + res = append(res, transform.ContextStatements{ + Context: transform.ContextID(encodeString(s["context"])), + Statements: s["statements"].([]string), + }) + } + + return res +} diff --git a/internal/converter/internal/otelcolconvert/testdata/transform.river b/internal/converter/internal/otelcolconvert/testdata/transform.river new file mode 100644 index 0000000000..5b7902f04f --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/transform.river @@ -0,0 +1,62 @@ +otelcol.receiver.otlp "default" { + grpc { } + + http { } + + output { + metrics = [otelcol.processor.transform.default.input] + logs = [otelcol.processor.transform.default.input] + traces = [otelcol.processor.transform.default.input] + } +} + +otelcol.processor.transform "default" { + error_mode = "ignore" + + trace_statements { + context = "resource" + statements = ["keep_keys(attributes, [\"service.name\", \"service.namespace\", \"cloud.region\", \"process.command_line\"])", "replace_pattern(attributes[\"process.command_line\"], \"password\\\\=[^\\\\s]*(\\\\s?)\", \"password=***\")", "limit(attributes, 100, [])", "truncate_all(attributes, 4096)"] + } + + trace_statements { + context = "span" + statements = ["set(status.code, 1) where attributes[\"http.path\"] == \"/health\"", "set(name, attributes[\"http.route\"])", "replace_match(attributes[\"http.target\"], \"/user/*/list/*\", \"/user/{userId}/list/{listId}\")", "limit(attributes, 100, [])", "truncate_all(attributes, 4096)"] + } + + metric_statements { + context = "resource" + statements = ["keep_keys(attributes, [\"host.name\"])", "truncate_all(attributes, 4096)"] + } + + metric_statements { + context = "metric" + statements = ["set(description, \"Sum\") where type == \"Sum\""] + } + + metric_statements { + context = "datapoint" + statements = ["limit(attributes, 100, [\"host.name\"])", "truncate_all(attributes, 4096)", "convert_sum_to_gauge() where metric.name == \"system.processes.count\"", "convert_gauge_to_sum(\"cumulative\", false) where metric.name == \"prometheus_metric\""] + } + + log_statements { + context = "resource" + statements = ["keep_keys(attributes, [\"service.name\", \"service.namespace\", \"cloud.region\"])"] + } + + log_statements { + context = "log" + statements = ["set(severity_text, \"FAIL\") where body == \"request failed\"", "replace_all_matches(attributes, \"/user/*/list/*\", \"/user/{userId}/list/{listId}\")", "replace_all_patterns(attributes, \"value\", \"/account/\\\\d{4}\", \"/account/{accountId}\")", "set(body, attributes[\"http.route\"])"] + } + + output { + metrics = [otelcol.exporter.otlp.default.input] + logs = [otelcol.exporter.otlp.default.input] + traces = [otelcol.exporter.otlp.default.input] + } +} + +otelcol.exporter.otlp "default" { + client { + endpoint = "database:4317" + } +} diff --git a/internal/converter/internal/otelcolconvert/testdata/transform.yaml b/internal/converter/internal/otelcolconvert/testdata/transform.yaml new file mode 100644 index 0000000000..4bd271d264 --- /dev/null +++ b/internal/converter/internal/otelcolconvert/testdata/transform.yaml @@ -0,0 +1,75 @@ +receivers: + otlp: + protocols: + grpc: + http: + +processors: + transform: + error_mode: ignore + trace_statements: + - context: resource + statements: + - keep_keys(attributes, ["service.name", "service.namespace", "cloud.region", "process.command_line"]) + - replace_pattern(attributes["process.command_line"], "password\\=[^\\s]*(\\s?)", "password=***") + - limit(attributes, 100, []) + - truncate_all(attributes, 4096) + - context: span + statements: + - set(status.code, 1) where attributes["http.path"] == "/health" + - set(name, attributes["http.route"]) + - replace_match(attributes["http.target"], "/user/*/list/*", "/user/{userId}/list/{listId}") + - limit(attributes, 100, []) + - truncate_all(attributes, 4096) + + metric_statements: + - context: resource + statements: + - keep_keys(attributes, ["host.name"]) + - truncate_all(attributes, 4096) + - context: metric + statements: + - set(description, "Sum") where type == "Sum" + - context: datapoint + statements: + - limit(attributes, 100, ["host.name"]) + - truncate_all(attributes, 4096) + - convert_sum_to_gauge() where metric.name == "system.processes.count" + - convert_gauge_to_sum("cumulative", false) where metric.name == "prometheus_metric" + + log_statements: + - context: resource + statements: + - keep_keys(attributes, ["service.name", "service.namespace", "cloud.region"]) + - context: log + statements: + - set(severity_text, "FAIL") where body == "request failed" + - replace_all_matches(attributes, "/user/*/list/*", "/user/{userId}/list/{listId}") + - replace_all_patterns(attributes, "value", "/account/\\d{4}", "/account/{accountId}") + - set(body, attributes["http.route"]) + + +exporters: + otlp: + # Our defaults have drifted from upstream, so we explicitly set our + # defaults below (balancer_name and queue_size). + endpoint: database:4317 + balancer_name: pick_first + sending_queue: + queue_size: 5000 + +service: + pipelines: + metrics: + receivers: [otlp] + processors: [transform] + exporters: [otlp] + logs: + receivers: [otlp] + processors: [transform] + exporters: [otlp] + traces: + receivers: [otlp] + processors: [transform] + exporters: [otlp] + From 448b2456b1d7f9b6fd5e9566e53cf893587b3e12 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Wed, 13 Mar 2024 12:23:58 -0400 Subject: [PATCH 12/12] service: allow services to have defined stability levels (#6674) This commit adds stability levels to services. Every service, with the exception of remotecfg, is then given a stability level of stable. remotecfg is given a stability level of "beta." --- internal/component/registry.go | 2 +- internal/flow/flow_services_test.go | 2 + internal/flow/internal/controller/loader.go | 14 +++ .../flow/internal/controller/loader_test.go | 90 +++++++++++++++++++ internal/service/cluster/cluster.go | 2 + internal/service/http/http.go | 2 + internal/service/labelstore/service.go | 2 + internal/service/otel/otel.go | 2 + internal/service/remotecfg/remotecfg.go | 2 + internal/service/service.go | 9 ++ internal/service/ui/ui.go | 2 + 11 files changed, 128 insertions(+), 1 deletion(-) diff --git a/internal/component/registry.go b/internal/component/registry.go index a7c0aca910..b382719bcd 100644 --- a/internal/component/registry.go +++ b/internal/component/registry.go @@ -124,7 +124,7 @@ type Registration struct { // sure the user is not accidentally using a component that is not yet stable - users // need to explicitly enable less-than-stable components via, for example, a command-line flag. // If a component is not stable enough, an attempt to create it via the controller will fail. - // The default stability level is Experimental. + // This field must be set to a non-zero value. Stability featuregate.Stability // An example Arguments value that the registered component expects to diff --git a/internal/flow/flow_services_test.go b/internal/flow/flow_services_test.go index 86e375132f..80404b80f4 100644 --- a/internal/flow/flow_services_test.go +++ b/internal/flow/flow_services_test.go @@ -63,6 +63,7 @@ func TestServices_Configurable(t *testing.T) { return service.Definition{ Name: "fake", ConfigType: ServiceOptions{}, + Stability: featuregate.StabilityBeta, } }, @@ -117,6 +118,7 @@ func TestServices_Configurable_Optional(t *testing.T) { return service.Definition{ Name: "fake", ConfigType: ServiceOptions{}, + Stability: featuregate.StabilityBeta, } }, diff --git a/internal/flow/internal/controller/loader.go b/internal/flow/internal/controller/loader.go index 43e102963e..8921d5ff18 100644 --- a/internal/flow/internal/controller/loader.go +++ b/internal/flow/internal/controller/loader.go @@ -10,6 +10,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/internal/dag" "github.com/grafana/agent/internal/flow/internal/worker" "github.com/grafana/agent/internal/flow/logging/level" @@ -441,6 +442,19 @@ func (l *Loader) populateServiceNodes(g *dag.Graph, serviceBlocks []*ast.BlockSt node := g.GetByID(blockID).(*ServiceNode) + // Don't permit configuring services that have a lower stability level than + // what is currently enabled. + nodeStability := node.Service().Definition().Stability + if err := featuregate.CheckAllowed(nodeStability, l.globals.MinStability, fmt.Sprintf("block %q", blockID)); err != nil { + diags.Add(diag.Diagnostic{ + Severity: diag.SeverityLevelError, + Message: err.Error(), + StartPos: ast.StartPos(block).Position(), + EndPos: ast.EndPos(block).Position(), + }) + continue + } + // Blocks assigned to services are reset to nil in the previous loop. // // If the block is non-nil, it means that there was a duplicate block diff --git a/internal/flow/internal/controller/loader_test.go b/internal/flow/internal/controller/loader_test.go index 672e2dce67..398cd5cae0 100644 --- a/internal/flow/internal/controller/loader_test.go +++ b/internal/flow/internal/controller/loader_test.go @@ -1,6 +1,7 @@ package controller_test import ( + "context" "errors" "os" "strings" @@ -11,6 +12,7 @@ import ( "github.com/grafana/agent/internal/flow/internal/controller" "github.com/grafana/agent/internal/flow/internal/dag" "github.com/grafana/agent/internal/flow/logging" + "github.com/grafana/agent/internal/service" "github.com/grafana/river/ast" "github.com/grafana/river/diag" "github.com/grafana/river/parser" @@ -316,6 +318,60 @@ func TestLoader(t *testing.T) { }) } +func TestLoader_Services(t *testing.T) { + testFile := ` + testsvc { } + ` + + testService := &fakeService{ + DefinitionFunc: func() service.Definition { + return service.Definition{ + Name: "testsvc", + ConfigType: struct { + Name string `river:"name,attr,optional"` + }{}, + Stability: featuregate.StabilityBeta, + } + }, + } + + newLoaderOptionsWithStability := func(stability featuregate.Stability) controller.LoaderOptions { + l, _ := logging.New(os.Stderr, logging.DefaultOptions) + return controller.LoaderOptions{ + ComponentGlobals: controller.ComponentGlobals{ + Logger: l, + TraceProvider: noop.NewTracerProvider(), + DataPath: t.TempDir(), + MinStability: stability, + OnBlockNodeUpdate: func(cn controller.BlockNode) { /* no-op */ }, + Registerer: prometheus.NewRegistry(), + NewModuleController: func(id string) controller.ModuleController { + return nil + }, + }, + Services: []service.Service{testService}, + } + } + + t.Run("Load with service at correct stability level", func(t *testing.T) { + l := controller.NewLoader(newLoaderOptionsWithStability(featuregate.StabilityBeta)) + diags := applyFromContent(t, l, []byte(testFile), nil, nil) + require.NoError(t, diags.ErrorOrNil()) + }) + + t.Run("Load with service below minimum stabilty level", func(t *testing.T) { + l := controller.NewLoader(newLoaderOptionsWithStability(featuregate.StabilityStable)) + diags := applyFromContent(t, l, []byte(testFile), nil, nil) + require.ErrorContains(t, diags.ErrorOrNil(), `block "testsvc" is at stability level "beta", which is below the minimum allowed stability level "stable"`) + }) + + t.Run("Load with undefined minimum stability level", func(t *testing.T) { + l := controller.NewLoader(newLoaderOptionsWithStability(featuregate.StabilityUndefined)) + diags := applyFromContent(t, l, []byte(testFile), nil, nil) + require.ErrorContains(t, diags.ErrorOrNil(), `stability levels must be defined: got "beta" as stability of block "testsvc" and as the minimum stability level`) + }) +} + // TestScopeWithFailingComponent is used to ensure that the scope is filled out, even if the component // fails to properly start. func TestScopeWithFailingComponent(t *testing.T) { @@ -473,3 +529,37 @@ func (f fakeModuleController) ClearModuleIDs() { func (f fakeModuleController) NewCustomComponent(id string, export component.ExportFunc) (controller.CustomComponent, error) { return nil, nil } + +type fakeService struct { + DefinitionFunc func() service.Definition // Required. + RunFunc func(ctx context.Context, host service.Host) error + UpdateFunc func(newConfig any) error + DataFunc func() any +} + +func (fs *fakeService) Definition() service.Definition { + return fs.DefinitionFunc() +} + +func (fs *fakeService) Run(ctx context.Context, host service.Host) error { + if fs.RunFunc != nil { + return fs.RunFunc(ctx, host) + } + + <-ctx.Done() + return nil +} + +func (fs *fakeService) Update(newConfig any) error { + if fs.UpdateFunc != nil { + return fs.UpdateFunc(newConfig) + } + return nil +} + +func (fs *fakeService) Data() any { + if fs.DataFunc != nil { + return fs.DataFunc() + } + return nil +} diff --git a/internal/service/cluster/cluster.go b/internal/service/cluster/cluster.go index 6c404b65f5..30c7a1cc45 100644 --- a/internal/service/cluster/cluster.go +++ b/internal/service/cluster/cluster.go @@ -15,6 +15,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/agent/internal/component" + "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/service" http_service "github.com/grafana/agent/internal/service/http" @@ -162,6 +163,7 @@ func (s *Service) Definition() service.Definition { // Cluster depends on the HTTP service to work properly. http_service.ServiceName, }, + Stability: featuregate.StabilityStable, } } diff --git a/internal/service/http/http.go b/internal/service/http/http.go index 16e9d8449f..26c93e3911 100644 --- a/internal/service/http/http.go +++ b/internal/service/http/http.go @@ -16,6 +16,7 @@ import ( "github.com/go-kit/log" "github.com/gorilla/mux" "github.com/grafana/agent/internal/component" + "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/service" @@ -128,6 +129,7 @@ func (s *Service) Definition() service.Definition { Name: ServiceName, ConfigType: Arguments{}, DependsOn: nil, // http has no dependencies. + Stability: featuregate.StabilityStable, } } diff --git a/internal/service/labelstore/service.go b/internal/service/labelstore/service.go index 6906462606..3a536ff2dc 100644 --- a/internal/service/labelstore/service.go +++ b/internal/service/labelstore/service.go @@ -6,6 +6,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" agent_service "github.com/grafana/agent/internal/service" flow_service "github.com/grafana/agent/internal/service" @@ -67,6 +68,7 @@ func (s *service) Definition() agent_service.Definition { Name: ServiceName, ConfigType: Arguments{}, DependsOn: nil, + Stability: featuregate.StabilityStable, } } diff --git a/internal/service/otel/otel.go b/internal/service/otel/otel.go index f8e4707250..4713abaeaf 100644 --- a/internal/service/otel/otel.go +++ b/internal/service/otel/otel.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/go-kit/log" + "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/service" "github.com/grafana/agent/internal/util" ) @@ -50,6 +51,7 @@ func (*Service) Definition() service.Definition { Name: ServiceName, ConfigType: nil, // otel does not accept configuration DependsOn: []string{}, + Stability: featuregate.StabilityStable, } } diff --git a/internal/service/remotecfg/remotecfg.go b/internal/service/remotecfg/remotecfg.go index 318404227d..6ddad9b824 100644 --- a/internal/service/remotecfg/remotecfg.go +++ b/internal/service/remotecfg/remotecfg.go @@ -17,6 +17,7 @@ import ( "github.com/grafana/agent-remote-config/api/gen/proto/go/agent/v1/agentv1connect" "github.com/grafana/agent/internal/agentseed" "github.com/grafana/agent/internal/component/common/config" + "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/flow/logging/level" "github.com/grafana/agent/internal/service" "github.com/grafana/river" @@ -128,6 +129,7 @@ func (s *Service) Definition() service.Definition { Name: ServiceName, ConfigType: Arguments{}, DependsOn: nil, // remotecfg has no dependencies. + Stability: featuregate.StabilityBeta, } } diff --git a/internal/service/service.go b/internal/service/service.go index 344ff11341..62751d464c 100644 --- a/internal/service/service.go +++ b/internal/service/service.go @@ -11,6 +11,7 @@ import ( "fmt" "github.com/grafana/agent/internal/component" + "github.com/grafana/agent/internal/featuregate" ) // Definition describes an individual Flow service. Services have unique names @@ -35,6 +36,14 @@ type Definition struct { // or a named service doesn't exist), it is treated as a fatal // error and the root Flow module will exit. DependsOn []string + + // Stability is the overall stability level of the service. This is used to + // make sure the user is not accidentally configuring a service that is not + // yet stable - users need to explicitly enable less-than-stable services + // via, for example, a command-line flag. If a service is not stable enough, + // an attempt to configure it via the controller will fail. + // This field must be set to a non-zero value. + Stability featuregate.Stability } // Host is a controller for services and Flow components. diff --git a/internal/service/ui/ui.go b/internal/service/ui/ui.go index bbf62b748b..ebc56943f8 100644 --- a/internal/service/ui/ui.go +++ b/internal/service/ui/ui.go @@ -8,6 +8,7 @@ import ( "path" "github.com/gorilla/mux" + "github.com/grafana/agent/internal/featuregate" "github.com/grafana/agent/internal/service" http_service "github.com/grafana/agent/internal/service/http" "github.com/grafana/agent/internal/web/api" @@ -46,6 +47,7 @@ func (s *Service) Definition() service.Definition { Name: ServiceName, ConfigType: nil, // ui does not accept configuration DependsOn: []string{http_service.ServiceName}, + Stability: featuregate.StabilityStable, } }