From 3ed09289837c45a89e2bb97c6c270d79f3b88065 Mon Sep 17 00:00:00 2001 From: Yuri Shkuro Date: Thu, 22 Jun 2023 13:04:52 -0400 Subject: [PATCH 1/8] [test] Avoid logging to testing.T from server goroutine (#4546) ## Which problem is this PR solving? - Resolves #4497 ## Short description of the changes - Do not use `zaptest.NewLogger(t)` because it causes race condition shown in the ticket when the server goroutine tries to log something that is being forwarded to `testing.T` while the test is being shutdown due to panic. - I was not able to get to the root cause why this happens, since the test is properly shutting down the server. This may indicate an issue in testing itself in how it handles panic. Signed-off-by: Yuri Shkuro --- cmd/collector/app/server/http_test.go | 32 ++++++++------------------- 1 file changed, 9 insertions(+), 23 deletions(-) diff --git a/cmd/collector/app/server/http_test.go b/cmd/collector/app/server/http_test.go index 6f2742d233e..acfd4a80a11 100644 --- a/cmd/collector/app/server/http_test.go +++ b/cmd/collector/app/server/http_test.go @@ -26,7 +26,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" - "go.uber.org/zap/zaptest" "github.com/jaegertracing/jaeger/cmd/collector/app/handler" "github.com/jaegertracing/jaeger/internal/metricstest" @@ -94,7 +93,6 @@ func TestSpanCollectorHTTPS(t *testing.T) { clientTLS tlscfg.Options expectError bool expectClientError bool - expectServerFail bool }{ { name: "should fail with TLS client to untrusted TLS server", @@ -109,7 +107,6 @@ func TestSpanCollectorHTTPS(t *testing.T) { }, expectError: true, expectClientError: true, - expectServerFail: false, }, { name: "should fail with TLS client to trusted TLS server with incorrect hostname", @@ -125,7 +122,6 @@ func TestSpanCollectorHTTPS(t *testing.T) { }, expectError: true, expectClientError: true, - expectServerFail: false, }, { name: "should pass with TLS client to trusted TLS server with correct hostname", @@ -139,9 +135,6 @@ func TestSpanCollectorHTTPS(t *testing.T) { CAPath: testCertKeyLocation + "/example-CA-cert.pem", ServerName: "example.com", }, - expectError: false, - expectClientError: false, - expectServerFail: false, }, { name: "should fail with TLS client without cert to trusted TLS server requiring cert", @@ -156,8 +149,6 @@ func TestSpanCollectorHTTPS(t *testing.T) { CAPath: testCertKeyLocation + "/example-CA-cert.pem", ServerName: "example.com", }, - expectError: false, - expectServerFail: false, expectClientError: true, }, { @@ -175,9 +166,6 @@ func TestSpanCollectorHTTPS(t *testing.T) { CertPath: testCertKeyLocation + "/example-client-cert.pem", KeyPath: testCertKeyLocation + "/example-client-key.pem", }, - expectError: false, - expectServerFail: false, - expectClientError: false, }, { name: "should fail with TLS client without cert to trusted TLS server requiring cert from a different CA", @@ -194,15 +182,15 @@ func TestSpanCollectorHTTPS(t *testing.T) { CertPath: testCertKeyLocation + "/example-client-cert.pem", KeyPath: testCertKeyLocation + "/example-client-key.pem", }, - expectError: false, - expectServerFail: false, expectClientError: true, }, } for _, test := range testCases { t.Run(test.name, func(t *testing.T) { - logger := zaptest.NewLogger(t) + // Cannot reliably use zaptest.NewLogger(t) because it causes race condition + // See https://github.com/jaegertracing/jaeger/issues/4497. + logger := zap.NewNop() params := &HTTPServerParams{ HostPort: fmt.Sprintf(":%d", ports.CollectorHTTP), Handler: handler.NewJaegerSpanHandler(logger, &mockSpanProcessor{}), @@ -214,14 +202,12 @@ func TestSpanCollectorHTTPS(t *testing.T) { } server, err := StartHTTPServer(params) - - if test.expectServerFail { - require.Error(t, err) - } - defer server.Close() - require.NoError(t, err) - clientTLSCfg, err0 := test.clientTLS.Config(zap.NewNop()) + defer func() { + assert.NoError(t, server.Close()) + }() + + clientTLSCfg, err0 := test.clientTLS.Config(logger) require.NoError(t, err0) dialer := &net.Dialer{Timeout: 2 * time.Second} conn, clientError := tls.DialWithDialer(dialer, "tcp", "localhost:"+fmt.Sprintf("%d", ports.CollectorHTTP), clientTLSCfg) @@ -260,7 +246,7 @@ func TestSpanCollectorHTTPS(t *testing.T) { } func TestStartHTTPServerParams(t *testing.T) { - logger := zaptest.NewLogger(t) + logger := zap.NewNop() params := &HTTPServerParams{ HostPort: fmt.Sprintf(":%d", ports.CollectorHTTP), Handler: handler.NewJaegerSpanHandler(logger, &mockSpanProcessor{}), From d3f8d2809b89c6fa065f714465649f24b880ba80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Jun 2023 23:54:42 -0400 Subject: [PATCH 2/8] Bump anchore/sbom-action from 0.14.1 to 0.14.3 (#4552) Bumps [anchore/sbom-action](https://github.com/anchore/sbom-action) from 0.14.1 to 0.14.3.
Release notes

Sourced from anchore/sbom-action's releases.

v0.14.3

Changes in v0.14.3

v0.14.2

Changes in v0.14.2

  • Update Syft to v0.80.0 (#415)
  • Make sure all invalid artifact name characters are replaced #396 (#417) [lts-po]
  • Ensure SBOM is copied to output-file (#411) [gszr]
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=anchore/sbom-action&package-manager=github_actions&previous-version=0.14.1&new-version=0.14.3)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/ci-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-release.yml b/.github/workflows/ci-release.yml index efe8f5e4841..fff3d6d97a3 100644 --- a/.github/workflows/ci-release.yml +++ b/.github/workflows/ci-release.yml @@ -89,6 +89,6 @@ jobs: QUAY_TOKEN: ${{ secrets.QUAY_TOKEN }} - name: SBOM Generation - uses: anchore/sbom-action@422cb34a0f8b599678c41b21163ea6088edb2624 + uses: anchore/sbom-action@78fc58e266e87a38d4194b2137a3d4e9bcaf7ca1 with: artifact-name: jaeger-SBOM.spdx.json From 283bdd93cbb4a467842625d8eb320722fcb83494 Mon Sep 17 00:00:00 2001 From: Yuri Shkuro Date: Wed, 28 Jun 2023 14:26:37 -0400 Subject: [PATCH 3/8] Add readme Signed-off-by: Yuri Shkuro --- examples/memstore-plugin/README.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 examples/memstore-plugin/README.md diff --git a/examples/memstore-plugin/README.md b/examples/memstore-plugin/README.md new file mode 100644 index 00000000000..566e85c0978 --- /dev/null +++ b/examples/memstore-plugin/README.md @@ -0,0 +1,6 @@ +# memstore-plugin + +This package builds a binary that can be used as an example of a sidecar storage plugin. + +Note that Jaeger now supports remote storages via gRPC API, so using plugins is discouraged. +For example, `memorystore` can be used as a remote backend (https://github.com/jaegertracing/jaeger/issues/3835). From f282d9b4ebc02e1f9f5450ef2714dc7aa75dce3a Mon Sep 17 00:00:00 2001 From: Ivan Babrou Date: Wed, 28 Jun 2023 19:25:13 -0700 Subject: [PATCH 4/8] Rebuild jaeger-ui if the tree does not match any tag exactly (#4553) The previous behavior resulted in the prebuild upstream package being downloaded, even if the local tree has any changes, whether they are commited or not. This broke our internal build that has some patches applied. See: * https://github.com/jaegertracing/jaeger/pull/4251#issuecomment-1608393236 Let's use a more strict approach and only use the prebuild package if the tag is exact and does not contain any extra changes. Signed-off-by: Ivan Babrou --- scripts/rebuild-ui.sh | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/scripts/rebuild-ui.sh b/scripts/rebuild-ui.sh index 05acafdd0cf..d395f1b3943 100644 --- a/scripts/rebuild-ui.sh +++ b/scripts/rebuild-ui.sh @@ -4,13 +4,13 @@ set -euxf -o pipefail cd jaeger-ui -LAST_TAG=$(git describe --abbrev=0 --tags 2>/dev/null) -BRANCH_HASH=$(git rev-parse HEAD) -LAST_TAG_HASH=$(git rev-parse $LAST_TAG) +LAST_TAG=$(git describe --tags --dirty 2>/dev/null) -if [[ "$BRANCH_HASH" == "$LAST_TAG_HASH" ]]; then - - if [[ "$LAST_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then +if [[ "$LAST_TAG" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + BRANCH_HASH=$(git rev-parse HEAD) + LAST_TAG_HASH=$(git rev-parse $LAST_TAG) + + if [[ "$BRANCH_HASH" == "$LAST_TAG_HASH" ]]; then temp_file=$(mktemp) trap "rm -f ${temp_file}" EXIT release_url="https://github.com/jaegertracing/jaeger-ui/releases/download/${LAST_TAG}/assets.tar.gz" @@ -20,16 +20,9 @@ if [[ "$BRANCH_HASH" == "$LAST_TAG_HASH" ]]; then rm -r -f packages/jaeger-ui/build/ tar -zxvf "$temp_file" packages/jaeger-ui/build/ exit 0 - fi fi - fi # do a regular full build yarn install --frozen-lockfile && cd packages/jaeger-ui && yarn build - - - - - From 855b22606412f3c1e0e0ffd0de406bd5cec4a827 Mon Sep 17 00:00:00 2001 From: Albert <26584478+albertteoh@users.noreply.github.com> Date: Fri, 30 Jun 2023 06:58:29 +1000 Subject: [PATCH 5/8] Support normalized metric names (#4555) ## Which problem is this PR solving? - Resolves #4547 ## Short description of the changes - Adds the following boolean parameters to declaratively determine whether the metric name should be modified to match normalization rules as defined [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/translator/prometheus/README.md): - `prometheus.query.normalize-calls` - `prometheus.query.normalize-duration` - Separate flags intentional to allow support for backwards compatibility with older OpenTelemetry Collector versions. A single flag is insufficient. - Motivated by a breaking change in: https://github.com/open-telemetry/opentelemetry-collector-contrib/releases/tag/v0.80.0. --------- Signed-off-by: albertteoh --- CHANGELOG.md | 7 ++ docker-compose/monitor/Makefile | 2 +- pkg/prometheus/config/config.go | 2 + .../metrics/prometheus/metricsstore/reader.go | 15 ++- .../prometheus/metricsstore/reader_test.go | 114 +++++++++++++++--- plugin/metrics/prometheus/options.go | 18 +++ 6 files changed, 137 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 49834300050..82d70976d54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,13 @@ next release (yyyy-mm-dd) #### ⛔ Breaking Changes +* [SPM] Due to a breaking change in OpenTelemetry's prometheus exporter ([details](https://github.com/open-telemetry/opentelemetry-collector-contrib/releases/tag/v0.80.0)) + metric names will no longer be normalized by default, meaning that the expected metric names would be `calls` and + `duration_*`. Backwards compatibility with older OpenTelemetry Collector versions can be achieved through the following flags: + * `prometheus.query.normalize-calls`: If true, normalizes the "calls" metric name. e.g. "calls_total". + * `prometheus.query.normalize-duration`: If true, normalizes the "duration" metric name to include the duration units. e.g. "duration_milliseconds_bucket". + + #### New Features #### Bug fixes, Minor Improvements diff --git a/docker-compose/monitor/Makefile b/docker-compose/monitor/Makefile index 6b0bfa2aa99..c6948ec1f8c 100644 --- a/docker-compose/monitor/Makefile +++ b/docker-compose/monitor/Makefile @@ -18,7 +18,7 @@ run-dev: _run-connector # _run-connector is the base target to bring up the system required for SPM using the new OTEL spanmetrics connector. .PHONY: _run-connector -_run-connector: export OTEL_IMAGE_TAG = latest +_run-connector: export OTEL_IMAGE_TAG = 0.80.0 _run-connector: export OTEL_CONFIG_SRC = ./otel-collector-config-connector.yml _run-connector: export PROMETHEUS_QUERY_SUPPORT_SPANMETRICS_CONNECTOR = true _run-connector: diff --git a/pkg/prometheus/config/config.go b/pkg/prometheus/config/config.go index c3e709f535c..418f1ba0746 100644 --- a/pkg/prometheus/config/config.go +++ b/pkg/prometheus/config/config.go @@ -30,4 +30,6 @@ type Configuration struct { SupportSpanmetricsConnector bool MetricNamespace string LatencyUnit string + NormalizeCalls bool + NormalizeDuration bool } diff --git a/plugin/metrics/prometheus/metricsstore/reader.go b/plugin/metrics/prometheus/metricsstore/reader.go index 1e0f69e0858..0cf6852183d 100644 --- a/plugin/metrics/prometheus/metricsstore/reader.go +++ b/plugin/metrics/prometheus/metricsstore/reader.go @@ -141,6 +141,10 @@ func buildFullLatencyMetricName(cfg config.Configuration) string { metricName = cfg.MetricNamespace + "_" + metricName } + if !cfg.NormalizeDuration { + return metricName + } + // The long names are automatically appended to the metric name by OTEL's prometheus exporters and are defined in: // https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/translator/prometheus#metric-name shortToLongName := map[string]string{"ms": "milliseconds", "s": "seconds"} @@ -160,7 +164,7 @@ func (m MetricsReader) GetCallRates(ctx context.Context, requestParams *metricss buildPromQuery: func(p promQueryParams) string { return fmt.Sprintf( // Note: p.spanKindFilter can be ""; trailing commas are okay within a timeseries selection. - `sum(rate(%s_total{service_name =~ "%s", %s}[%s])) by (%s)`, + `sum(rate(%s{service_name =~ "%s", %s}[%s])) by (%s)`, m.callsMetricName, p.serviceFilter, p.spanKindFilter, @@ -181,7 +185,12 @@ func buildFullCallsMetricName(cfg config.Configuration) string { if cfg.MetricNamespace != "" { metricName = cfg.MetricNamespace + "_" + metricName } - return metricName + + if !cfg.NormalizeCalls { + return metricName + } + + return metricName + "_total" } // GetErrorRates gets the error rate metrics for the given set of error rate query parameters. @@ -193,7 +202,7 @@ func (m MetricsReader) GetErrorRates(ctx context.Context, requestParams *metrics buildPromQuery: func(p promQueryParams) string { return fmt.Sprintf( // Note: p.spanKindFilter can be ""; trailing commas are okay within a timeseries selection. - `sum(rate(%s_total{service_name =~ "%s", status_code = "STATUS_CODE_ERROR", %s}[%s])) by (%s) / sum(rate(%s_total{service_name =~ "%s", %s}[%s])) by (%s)`, + `sum(rate(%s{service_name =~ "%s", status_code = "STATUS_CODE_ERROR", %s}[%s])) by (%s) / sum(rate(%s{service_name =~ "%s", %s}[%s])) by (%s)`, m.callsMetricName, p.serviceFilter, p.spanKindFilter, p.rate, p.groupBy, m.callsMetricName, p.serviceFilter, p.spanKindFilter, p.rate, p.groupBy, ) diff --git a/plugin/metrics/prometheus/metricsstore/reader_test.go b/plugin/metrics/prometheus/metricsstore/reader_test.go index 501c664c130..cf027f8d6a0 100644 --- a/plugin/metrics/prometheus/metricsstore/reader_test.go +++ b/plugin/metrics/prometheus/metricsstore/reader_test.go @@ -178,7 +178,7 @@ func TestGetLatencies(t *testing.T) { `span_kind =~ "SPAN_KIND_SERVER|SPAN_KIND_CLIENT"}[10m])) by (service_name,le))`, }, { - name: "override the default latency metric name", + name: "enable support for spanmetrics connector with normalized metric name", serviceNames: []string{"emailservice"}, spanKinds: []string{"SPAN_KIND_SERVER"}, groupByOperation: true, @@ -193,7 +193,26 @@ func TestGetLatencies(t *testing.T) { wantLabels: map[string]string{ "service_name": "emailservice", }, - wantPromQlQuery: `histogram_quantile(0.95, sum(rate(span_metrics_duration_seconds_bucket{service_name =~ "emailservice", ` + + wantPromQlQuery: `histogram_quantile(0.95, sum(rate(span_metrics_duration_bucket{service_name =~ "emailservice", ` + + `span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,span_name,le))`, + }, + { + name: "enable support for spanmetrics connector with normalized metric name", + serviceNames: []string{"emailservice"}, + spanKinds: []string{"SPAN_KIND_SERVER"}, + groupByOperation: true, + updateConfig: func(cfg config.Configuration) config.Configuration { + cfg.SupportSpanmetricsConnector = true + cfg.NormalizeDuration = true + cfg.LatencyUnit = "s" + return cfg + }, + wantName: "service_operation_latencies", + wantDescription: "0.95th quantile latency, grouped by service & operation", + wantLabels: map[string]string{ + "service_name": "emailservice", + }, + wantPromQlQuery: `histogram_quantile(0.95, sum(rate(duration_seconds_bucket{service_name =~ "emailservice", ` + `span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,span_name,le))`, }, } { @@ -228,7 +247,7 @@ func TestGetCallRates(t *testing.T) { wantLabels: map[string]string{ "service_name": "emailservice", }, - wantPromQlQuery: `sum(rate(calls_total{service_name =~ "emailservice", ` + + wantPromQlQuery: `sum(rate(calls{service_name =~ "emailservice", ` + `span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name)`, }, { @@ -242,7 +261,7 @@ func TestGetCallRates(t *testing.T) { "operation": "/OrderResult", "service_name": "emailservice", }, - wantPromQlQuery: `sum(rate(calls_total{service_name =~ "emailservice", ` + + wantPromQlQuery: `sum(rate(calls{service_name =~ "emailservice", ` + `span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,operation)`, }, { @@ -255,11 +274,11 @@ func TestGetCallRates(t *testing.T) { wantLabels: map[string]string{ "service_name": "emailservice", }, - wantPromQlQuery: `sum(rate(calls_total{service_name =~ "frontend|emailservice", ` + + wantPromQlQuery: `sum(rate(calls{service_name =~ "frontend|emailservice", ` + `span_kind =~ "SPAN_KIND_SERVER|SPAN_KIND_CLIENT"}[10m])) by (service_name)`, }, { - name: "override the default call rate metric name", + name: "enable support for spanmetrics connector with a namespace", serviceNames: []string{"emailservice"}, spanKinds: []string{"SPAN_KIND_SERVER"}, groupByOperation: true, @@ -273,7 +292,25 @@ func TestGetCallRates(t *testing.T) { wantLabels: map[string]string{ "service_name": "emailservice", }, - wantPromQlQuery: `sum(rate(span_metrics_calls_total{service_name =~ "emailservice", ` + + wantPromQlQuery: `sum(rate(span_metrics_calls{service_name =~ "emailservice", ` + + `span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,span_name)`, + }, + { + name: "enable support for spanmetrics connector with normalized metric name", + serviceNames: []string{"emailservice"}, + spanKinds: []string{"SPAN_KIND_SERVER"}, + groupByOperation: true, + updateConfig: func(cfg config.Configuration) config.Configuration { + cfg.SupportSpanmetricsConnector = true + cfg.NormalizeCalls = true + return cfg + }, + wantName: "service_operation_call_rate", + wantDescription: "calls/sec, grouped by service & operation", + wantLabels: map[string]string{ + "service_name": "emailservice", + }, + wantPromQlQuery: `sum(rate(calls_total{service_name =~ "emailservice", ` + `span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,span_name)`, }, } { @@ -307,9 +344,9 @@ func TestGetErrorRates(t *testing.T) { wantLabels: map[string]string{ "service_name": "emailservice", }, - wantPromQlQuery: `sum(rate(calls_total{service_name =~ "emailservice", status_code = "STATUS_CODE_ERROR", ` + + wantPromQlQuery: `sum(rate(calls{service_name =~ "emailservice", status_code = "STATUS_CODE_ERROR", ` + `span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name) / ` + - `sum(rate(calls_total{service_name =~ "emailservice", span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name)`, + `sum(rate(calls{service_name =~ "emailservice", span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name)`, }, { name: "group by service and operation should be reflected in name/description and query group-by", @@ -322,9 +359,9 @@ func TestGetErrorRates(t *testing.T) { "operation": "/OrderResult", "service_name": "emailservice", }, - wantPromQlQuery: `sum(rate(calls_total{service_name =~ "emailservice", status_code = "STATUS_CODE_ERROR", ` + + wantPromQlQuery: `sum(rate(calls{service_name =~ "emailservice", status_code = "STATUS_CODE_ERROR", ` + `span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,operation) / ` + - `sum(rate(calls_total{service_name =~ "emailservice", span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,operation)`, + `sum(rate(calls{service_name =~ "emailservice", span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,operation)`, }, { name: "two services and span kinds result in regex 'or' symbol in query", @@ -336,12 +373,32 @@ func TestGetErrorRates(t *testing.T) { wantLabels: map[string]string{ "service_name": "emailservice", }, - wantPromQlQuery: `sum(rate(calls_total{service_name =~ "frontend|emailservice", status_code = "STATUS_CODE_ERROR", ` + + wantPromQlQuery: `sum(rate(calls{service_name =~ "frontend|emailservice", status_code = "STATUS_CODE_ERROR", ` + `span_kind =~ "SPAN_KIND_SERVER|SPAN_KIND_CLIENT"}[10m])) by (service_name) / ` + - `sum(rate(calls_total{service_name =~ "frontend|emailservice", span_kind =~ "SPAN_KIND_SERVER|SPAN_KIND_CLIENT"}[10m])) by (service_name)`, + `sum(rate(calls{service_name =~ "frontend|emailservice", span_kind =~ "SPAN_KIND_SERVER|SPAN_KIND_CLIENT"}[10m])) by (service_name)`, }, { - name: "override the default error rate metric name", + name: "neither metric namespace nor enabling normalized metric names have an impact when spanmetrics connector is not supported", + serviceNames: []string{"emailservice"}, + spanKinds: []string{"SPAN_KIND_SERVER"}, + groupByOperation: false, + updateConfig: func(cfg config.Configuration) config.Configuration { + cfg.SupportSpanmetricsConnector = false + cfg.MetricNamespace = "span_metrics" + cfg.NormalizeCalls = true + return cfg + }, + wantName: "service_error_rate", + wantDescription: "error rate, computed as a fraction of errors/sec over calls/sec, grouped by service", + wantLabels: map[string]string{ + "service_name": "emailservice", + }, + wantPromQlQuery: `sum(rate(calls{service_name =~ "emailservice", status_code = "STATUS_CODE_ERROR", ` + + `span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name) / ` + + `sum(rate(calls{service_name =~ "emailservice", span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name)`, + }, + { + name: "enable support for spanmetrics connector with a metric namespace", serviceNames: []string{"emailservice"}, spanKinds: []string{"SPAN_KIND_SERVER"}, groupByOperation: true, @@ -355,9 +412,28 @@ func TestGetErrorRates(t *testing.T) { wantLabels: map[string]string{ "service_name": "emailservice", }, - wantPromQlQuery: `sum(rate(span_metrics_calls_total{service_name =~ "emailservice", status_code = "STATUS_CODE_ERROR", ` + + wantPromQlQuery: `sum(rate(span_metrics_calls{service_name =~ "emailservice", status_code = "STATUS_CODE_ERROR", ` + `span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,span_name) / ` + - `sum(rate(span_metrics_calls_total{service_name =~ "emailservice", span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,span_name)`, + `sum(rate(span_metrics_calls{service_name =~ "emailservice", span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,span_name)`, + }, + { + name: "enable support for spanmetrics connector with normalized metric name", + serviceNames: []string{"emailservice"}, + spanKinds: []string{"SPAN_KIND_SERVER"}, + groupByOperation: true, + updateConfig: func(cfg config.Configuration) config.Configuration { + cfg.SupportSpanmetricsConnector = true + cfg.NormalizeCalls = true + return cfg + }, + wantName: "service_operation_error_rate", + wantDescription: "error rate, computed as a fraction of errors/sec over calls/sec, grouped by service & operation", + wantLabels: map[string]string{ + "service_name": "emailservice", + }, + wantPromQlQuery: `sum(rate(calls_total{service_name =~ "emailservice", status_code = "STATUS_CODE_ERROR", ` + + `span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,span_name) / ` + + `sum(rate(calls_total{service_name =~ "emailservice", span_kind =~ "SPAN_KIND_SERVER"}[10m])) by (service_name,span_name)`, }, } { t.Run(tc.name, func(t *testing.T) { @@ -384,7 +460,11 @@ func TestInvalidLatencyUnit(t *testing.T) { t.Errorf("Expected a panic due to invalid latency unit") } }() - cfg := config.Configuration{SupportSpanmetricsConnector: true, LatencyUnit: "something invalid"} + cfg := config.Configuration{ + SupportSpanmetricsConnector: true, + NormalizeDuration: true, + LatencyUnit: "something invalid", + } _, _ = NewMetricsReader(zap.NewNop(), cfg) } diff --git a/plugin/metrics/prometheus/options.go b/plugin/metrics/prometheus/options.go index 1ea90e29756..ae31944230a 100644 --- a/plugin/metrics/prometheus/options.go +++ b/plugin/metrics/prometheus/options.go @@ -34,6 +34,8 @@ const ( suffixSupportSpanmetricsConnector = ".query.support-spanmetrics-connector" suffixMetricNamespace = ".query.namespace" suffixLatencyUnit = ".query.duration-unit" + suffixNormalizeCalls = ".query.normalize-calls" + suffixNormalizeDuration = ".query.normalize-duration" defaultServerURL = "http://localhost:9090" defaultConnectTimeout = 30 * time.Second @@ -42,6 +44,8 @@ const ( defaultSupportSpanmetricsConnector = false defaultMetricNamespace = "" defaultLatencyUnit = "ms" + defaultNormalizeCalls = false + defaultNormalizeDuration = false ) type namespaceConfig struct { @@ -63,6 +67,8 @@ func NewOptions(primaryNamespace string) *Options { SupportSpanmetricsConnector: false, MetricNamespace: defaultMetricNamespace, LatencyUnit: defaultLatencyUnit, + NormalizeCalls: defaultNormalizeCalls, + NormalizeDuration: defaultNormalizeCalls, } return &Options{ @@ -93,6 +99,16 @@ func (opt *Options) AddFlags(flagSet *flag.FlagSet) { `histogram unit value set in the spanmetrics connector (see: `+ `https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/connector/spanmetricsconnector#configurations). `+ `This also helps jaeger-query determine the metric name when querying for "latency" metrics.`) + flagSet.Bool(nsConfig.namespace+suffixNormalizeCalls, defaultNormalizeCalls, + `Whether to normalize the "calls" metric name according to `+ + `https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/translator/prometheus/README.md. `+ + `For example: `+ + `"calls" (not normalized) -> "calls_total" (normalized), `) + flagSet.Bool(nsConfig.namespace+suffixNormalizeDuration, defaultNormalizeDuration, + `Whether to normalize the "duration" metric name according to `+ + `https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/pkg/translator/prometheus/README.md. `+ + `For example: `+ + `"duration_bucket" (not normalized) -> "duration_milliseconds_bucket (normalized)"`) nsConfig.getTLSFlagsConfig().AddFlags(flagSet) } @@ -107,6 +123,8 @@ func (opt *Options) InitFromViper(v *viper.Viper) error { cfg.SupportSpanmetricsConnector = v.GetBool(cfg.namespace + suffixSupportSpanmetricsConnector) cfg.MetricNamespace = v.GetString(cfg.namespace + suffixMetricNamespace) cfg.LatencyUnit = v.GetString(cfg.namespace + suffixLatencyUnit) + cfg.NormalizeCalls = v.GetBool(cfg.namespace + suffixNormalizeCalls) + cfg.NormalizeDuration = v.GetBool(cfg.namespace + suffixNormalizeDuration) isValidUnit := map[string]bool{"ms": true, "s": true} if _, ok := isValidUnit[cfg.LatencyUnit]; !ok { From 050ba81e2228e5f04d5a950bb596774fb1f18b9d Mon Sep 17 00:00:00 2001 From: Albert <26584478+albertteoh@users.noreply.github.com> Date: Sun, 2 Jul 2023 19:42:02 +1000 Subject: [PATCH 6/8] Fix OTEL logging in HotRod example (#4556) ## Short description of the changes - A cleaned up version of https://github.com/jaegertracing/jaeger/pull/4550. - It uses a single spanLogger for both OpenTracing and OpenTelemetry spans. - The logging works for both types spans because of [this clever line of code](https://github.com/opentracing/opentracing-go/blob/master/gocontext.go#L14) where `tracerWithHook` is an OTEL bridge tracer, allowing span to be stored in the `ctx` the "OTEL-way", making the span discoverable as a legitimate OTEL span such that `ok == true`: ```go if span, ok := ctx.Value(currentSpanKey).(Span); ok { ... } ``` ## Testing Ran locally to confirm logs appearing for both OpenTracing spans: ![Screenshot 2023-07-01 at 8 12 16 pm](https://github.com/jaegertracing/jaeger/assets/26584478/572b037a-8fbb-43aa-abdf-6a8a2a137f89) and OpenTelemetry spans: ![Screenshot 2023-07-01 at 8 12 08 pm](https://github.com/jaegertracing/jaeger/assets/26584478/681da3b3-8f07-4f85-b60e-8235592c89bd) --------- Signed-off-by: albertteoh --- examples/hotrod/pkg/log/factory.go | 15 +-- examples/hotrod/pkg/log/spanlogger.go | 143 +++++++++++++++----------- 2 files changed, 87 insertions(+), 71 deletions(-) diff --git a/examples/hotrod/pkg/log/factory.go b/examples/hotrod/pkg/log/factory.go index 32ae2d818fb..3da19424c52 100644 --- a/examples/hotrod/pkg/log/factory.go +++ b/examples/hotrod/pkg/log/factory.go @@ -18,7 +18,6 @@ package log import ( "context" - ot "github.com/opentracing/opentracing-go" "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -44,16 +43,12 @@ func (b Factory) Bg() Logger { // contains an OpenTracing span, all logging calls are also // echo-ed into the span. func (b Factory) For(ctx context.Context) Logger { - if otSpan := ot.SpanFromContext(ctx); otSpan != nil { - logger := spanLogger{span: otSpan, logger: b.logger} - - if otelSpan := trace.SpanFromContext(ctx); otelSpan != nil { - logger.spanFields = []zapcore.Field{ - zap.String("trace_id", otelSpan.SpanContext().TraceID().String()), - zap.String("span_id", otelSpan.SpanContext().SpanID().String()), - } + if otelSpan := trace.SpanFromContext(ctx); otelSpan != nil { + logger := spanLogger{span: otelSpan, logger: b.logger} + logger.spanFields = []zapcore.Field{ + zap.String("trace_id", otelSpan.SpanContext().TraceID().String()), + zap.String("span_id", otelSpan.SpanContext().SpanID().String()), } - return logger } return b.Bg() diff --git a/examples/hotrod/pkg/log/spanlogger.go b/examples/hotrod/pkg/log/spanlogger.go index dd143d19dbd..d8f64762943 100644 --- a/examples/hotrod/pkg/log/spanlogger.go +++ b/examples/hotrod/pkg/log/spanlogger.go @@ -16,23 +16,24 @@ package log import ( + "fmt" "time" - "github.com/opentracing/opentracing-go" - tag "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) type spanLogger struct { logger *zap.Logger - span opentracing.Span + span trace.Span spanFields []zapcore.Field } func (sl spanLogger) Debug(msg string, fields ...zapcore.Field) { - sl.logToSpan("Debug", msg, fields...) + sl.logToSpan("debug", msg, fields...) sl.logger.Debug(msg, append(sl.spanFields, fields...)...) } @@ -48,7 +49,7 @@ func (sl spanLogger) Error(msg string, fields ...zapcore.Field) { func (sl spanLogger) Fatal(msg string, fields ...zapcore.Field) { sl.logToSpan("fatal", msg, fields...) - tag.Error.Set(sl.span, true) + sl.span.SetStatus(codes.Error, msg) sl.logger.Fatal(msg, append(sl.spanFields, fields...)...) } @@ -57,99 +58,119 @@ func (sl spanLogger) With(fields ...zapcore.Field) Logger { return spanLogger{logger: sl.logger.With(fields...), span: sl.span, spanFields: sl.spanFields} } -func (sl spanLogger) logToSpan(level string, msg string, fields ...zapcore.Field) { - // TODO rather than always converting the fields, we could wrap them into a lazy logger - fa := fieldAdapter(make([]log.Field, 0, 2+len(fields))) - fa = append(fa, log.String("event", msg)) - fa = append(fa, log.String("level", level)) +func (sl spanLogger) logToSpan(level, msg string, fields ...zapcore.Field) { + fields = append(fields, zap.String("level", level)) + sl.span.AddEvent( + msg, + trace.WithAttributes(logFieldsToOTelAttrs(fields)...), + ) +} + +func logFieldsToOTelAttrs(fields []zapcore.Field) []attribute.KeyValue { + encoder := &bridgeFieldEncoder{} for _, field := range fields { - field.AddTo(&fa) + field.AddTo(encoder) } - sl.span.LogFields(fa...) + return encoder.pairs } -type fieldAdapter []log.Field +type bridgeFieldEncoder struct { + pairs []attribute.KeyValue +} -func (fa *fieldAdapter) AddBool(key string, value bool) { - *fa = append(*fa, log.Bool(key, value)) +func (e *bridgeFieldEncoder) AddArray(key string, marshaler zapcore.ArrayMarshaler) error { + e.pairs = append(e.pairs, attribute.String(key, fmt.Sprint(marshaler))) + return nil } -func (fa *fieldAdapter) AddFloat64(key string, value float64) { - *fa = append(*fa, log.Float64(key, value)) +func (e *bridgeFieldEncoder) AddObject(key string, marshaler zapcore.ObjectMarshaler) error { + e.pairs = append(e.pairs, attribute.String(key, fmt.Sprint(marshaler))) + return nil } -func (fa *fieldAdapter) AddFloat32(key string, value float32) { - *fa = append(*fa, log.Float64(key, float64(value))) +func (e *bridgeFieldEncoder) AddBinary(key string, value []byte) { + e.pairs = append(e.pairs, attribute.String(key, fmt.Sprint(value))) } -func (fa *fieldAdapter) AddInt(key string, value int) { - *fa = append(*fa, log.Int(key, value)) +func (e *bridgeFieldEncoder) AddByteString(key string, value []byte) { + e.pairs = append(e.pairs, attribute.String(key, fmt.Sprint(value))) } -func (fa *fieldAdapter) AddInt64(key string, value int64) { - *fa = append(*fa, log.Int64(key, value)) +func (e *bridgeFieldEncoder) AddBool(key string, value bool) { + e.pairs = append(e.pairs, attribute.Bool(key, value)) } -func (fa *fieldAdapter) AddInt32(key string, value int32) { - *fa = append(*fa, log.Int64(key, int64(value))) +func (e *bridgeFieldEncoder) AddComplex128(key string, value complex128) { + e.pairs = append(e.pairs, attribute.String(key, fmt.Sprint(value))) } -func (fa *fieldAdapter) AddInt16(key string, value int16) { - *fa = append(*fa, log.Int64(key, int64(value))) +func (e *bridgeFieldEncoder) AddComplex64(key string, value complex64) { + e.pairs = append(e.pairs, attribute.String(key, fmt.Sprint(value))) } -func (fa *fieldAdapter) AddInt8(key string, value int8) { - *fa = append(*fa, log.Int64(key, int64(value))) +func (e *bridgeFieldEncoder) AddDuration(key string, value time.Duration) { + e.pairs = append(e.pairs, attribute.String(key, fmt.Sprint(value))) } -func (fa *fieldAdapter) AddUint(key string, value uint) { - *fa = append(*fa, log.Uint64(key, uint64(value))) +func (e *bridgeFieldEncoder) AddFloat64(key string, value float64) { + e.pairs = append(e.pairs, attribute.Float64(key, value)) } -func (fa *fieldAdapter) AddUint64(key string, value uint64) { - *fa = append(*fa, log.Uint64(key, value)) +func (e *bridgeFieldEncoder) AddFloat32(key string, value float32) { + e.pairs = append(e.pairs, attribute.Float64(key, float64(value))) } -func (fa *fieldAdapter) AddUint32(key string, value uint32) { - *fa = append(*fa, log.Uint64(key, uint64(value))) +func (e *bridgeFieldEncoder) AddInt(key string, value int) { + e.pairs = append(e.pairs, attribute.Int(key, value)) } -func (fa *fieldAdapter) AddUint16(key string, value uint16) { - *fa = append(*fa, log.Uint64(key, uint64(value))) +func (e *bridgeFieldEncoder) AddInt64(key string, value int64) { + e.pairs = append(e.pairs, attribute.Int64(key, value)) } -func (fa *fieldAdapter) AddUint8(key string, value uint8) { - *fa = append(*fa, log.Uint64(key, uint64(value))) +func (e *bridgeFieldEncoder) AddInt32(key string, value int32) { + e.pairs = append(e.pairs, attribute.Int64(key, int64(value))) } -func (fa *fieldAdapter) AddUintptr(key string, value uintptr) {} -func (fa *fieldAdapter) AddArray(key string, marshaler zapcore.ArrayMarshaler) error { return nil } -func (fa *fieldAdapter) AddComplex128(key string, value complex128) {} -func (fa *fieldAdapter) AddComplex64(key string, value complex64) {} -func (fa *fieldAdapter) AddObject(key string, value zapcore.ObjectMarshaler) error { return nil } -func (fa *fieldAdapter) AddReflected(key string, value interface{}) error { return nil } -func (fa *fieldAdapter) OpenNamespace(key string) {} +func (e *bridgeFieldEncoder) AddInt16(key string, value int16) { + e.pairs = append(e.pairs, attribute.Int64(key, int64(value))) +} -func (fa *fieldAdapter) AddDuration(key string, value time.Duration) { - // TODO inefficient - *fa = append(*fa, log.String(key, value.String())) +func (e *bridgeFieldEncoder) AddInt8(key string, value int8) { + e.pairs = append(e.pairs, attribute.Int64(key, int64(value))) } -func (fa *fieldAdapter) AddTime(key string, value time.Time) { - // TODO inefficient - *fa = append(*fa, log.String(key, value.String())) +func (e *bridgeFieldEncoder) AddString(key, value string) { + e.pairs = append(e.pairs, attribute.String(key, value)) } -func (fa *fieldAdapter) AddBinary(key string, value []byte) { - *fa = append(*fa, log.Object(key, value)) +func (e *bridgeFieldEncoder) AddTime(key string, value time.Time) { + e.pairs = append(e.pairs, attribute.String(key, fmt.Sprint(value))) } -func (fa *fieldAdapter) AddByteString(key string, value []byte) { - *fa = append(*fa, log.Object(key, value)) +func (e *bridgeFieldEncoder) AddUint(key string, value uint) { + e.pairs = append(e.pairs, attribute.String(key, fmt.Sprintf("%d", value))) } -func (fa *fieldAdapter) AddString(key, value string) { - if key != "" && value != "" { - *fa = append(*fa, log.String(key, value)) - } +func (e *bridgeFieldEncoder) AddUint64(key string, value uint64) { + e.pairs = append(e.pairs, attribute.String(key, fmt.Sprintf("%d", value))) +} + +func (e *bridgeFieldEncoder) AddUint32(key string, value uint32) { + e.pairs = append(e.pairs, attribute.Int64(key, int64(value))) } + +func (e *bridgeFieldEncoder) AddUint16(key string, value uint16) { + e.pairs = append(e.pairs, attribute.Int64(key, int64(value))) +} + +func (e *bridgeFieldEncoder) AddUint8(key string, value uint8) { + e.pairs = append(e.pairs, attribute.Int64(key, int64(value))) +} + +func (e *bridgeFieldEncoder) AddUintptr(key string, value uintptr) { + e.pairs = append(e.pairs, attribute.String(key, fmt.Sprint(value))) +} + +func (e *bridgeFieldEncoder) AddReflected(key string, value interface{}) error { return nil } +func (e *bridgeFieldEncoder) OpenNamespace(key string) {} From 321357ac239f987bea709b4b678ff2a7ecc0e6cc Mon Sep 17 00:00:00 2001 From: Afzal Ansari Date: Sun, 2 Jul 2023 20:47:29 +0530 Subject: [PATCH 7/8] [hotROD] Replace gRPC instrumentation with OTEL (#4558) --- examples/hotrod/pkg/tracing/init.go | 4 ++-- examples/hotrod/services/driver/redis.go | 3 ++- examples/hotrod/services/driver/server.go | 11 ++++------- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/examples/hotrod/pkg/tracing/init.go b/examples/hotrod/pkg/tracing/init.go index 367d328b30d..82333540273 100644 --- a/examples/hotrod/pkg/tracing/init.go +++ b/examples/hotrod/pkg/tracing/init.go @@ -45,11 +45,11 @@ import ( var once sync.Once // InitOTEL initializes OpenTelemetry SDK. -func InitOTEL(serviceName string, exporterType string, metricsFactory metrics.Factory, logger log.Factory) trace.Tracer { +func InitOTEL(serviceName string, exporterType string, metricsFactory metrics.Factory, logger log.Factory) trace.TracerProvider { _, oteltp := initBOTH(serviceName, exporterType, metricsFactory, logger) logger.Bg().Debug("Created OTEL tracer", zap.String("service-name", serviceName)) - return oteltp.Tracer(serviceName) + return oteltp } // Init returns OTel-OpenTracing Bridge. diff --git a/examples/hotrod/services/driver/redis.go b/examples/hotrod/services/driver/redis.go index 1199017dfb0..004d4c18996 100644 --- a/examples/hotrod/services/driver/redis.go +++ b/examples/hotrod/services/driver/redis.go @@ -42,8 +42,9 @@ type Redis struct { } func newRedis(otelExporter string, metricsFactory metrics.Factory, logger log.Factory) *Redis { + tp := tracing.InitOTEL("redis-manual", otelExporter, metricsFactory, logger) return &Redis{ - tracer: tracing.InitOTEL("redis-manual", otelExporter, metricsFactory, logger), + tracer: tp.Tracer("redis-manual"), logger: logger, } } diff --git a/examples/hotrod/services/driver/server.go b/examples/hotrod/services/driver/server.go index e1d1aa3975b..4c112daf718 100644 --- a/examples/hotrod/services/driver/server.go +++ b/examples/hotrod/services/driver/server.go @@ -19,8 +19,7 @@ import ( "context" "net" - otgrpc "github.com/opentracing-contrib/go-grpc" - "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.uber.org/zap" "google.golang.org/grpc" @@ -32,7 +31,6 @@ import ( // Server implements jaeger-demo-frontend service type Server struct { hostPort string - tracer opentracing.Tracer logger log.Factory redis *Redis server *grpc.Server @@ -42,14 +40,13 @@ var _ DriverServiceServer = (*Server)(nil) // NewServer creates a new driver.Server func NewServer(hostPort string, otelExporter string, metricsFactory metrics.Factory, logger log.Factory) *Server { - tracer := tracing.Init("driver", otelExporter, metricsFactory, logger) + tracerProvider := tracing.InitOTEL("driver", otelExporter, metricsFactory, logger) server := grpc.NewServer( - grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer)), - grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer)), + grpc.UnaryInterceptor(otelgrpc.UnaryServerInterceptor(otelgrpc.WithTracerProvider(tracerProvider))), + grpc.StreamInterceptor(otelgrpc.StreamServerInterceptor(otelgrpc.WithTracerProvider(tracerProvider))), ) return &Server{ hostPort: hostPort, - tracer: tracer, logger: logger, server: server, redis: newRedis(otelExporter, metricsFactory, logger), From b8338390ccce7bf447ebc4ac3cb996b9750bd922 Mon Sep 17 00:00:00 2001 From: Afzal Ansari Date: Sun, 2 Jul 2023 23:49:24 +0530 Subject: [PATCH 8/8] [hotROD] Add OTEL instrumentation to customer svc (#4559) ## Which problem is this PR solving? Related to https://github.com/jaegertracing/jaeger/issues/3380 Part of https://github.com/jaegertracing/jaeger/issues/3381 ## Short description of the changes - This PR adds OTEL instrumentation to customer svc --------- Signed-off-by: Afzal Ansari --- examples/hotrod/services/customer/database.go | 22 ++++++++----------- examples/hotrod/services/customer/server.go | 2 +- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/examples/hotrod/services/customer/database.go b/examples/hotrod/services/customer/database.go index a89b01f1989..90960ed3196 100644 --- a/examples/hotrod/services/customer/database.go +++ b/examples/hotrod/services/customer/database.go @@ -19,8 +19,9 @@ import ( "context" "errors" - "github.com/opentracing/opentracing-go" - tags "github.com/opentracing/opentracing-go/ext" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" "go.uber.org/zap" "github.com/jaegertracing/jaeger/examples/hotrod/pkg/delay" @@ -31,13 +32,13 @@ import ( // database simulates Customer repository implemented on top of an SQL database type database struct { - tracer opentracing.Tracer + tracer trace.Tracer logger log.Factory customers map[string]*Customer lock *tracing.Mutex } -func newDatabase(tracer opentracing.Tracer, logger log.Factory) *database { +func newDatabase(tracer trace.Tracer, logger log.Factory) *database { return &database{ tracer: tracer, logger: logger, @@ -73,15 +74,10 @@ func (d *database) Get(ctx context.Context, customerID string) (*Customer, error d.logger.For(ctx).Info("Loading customer", zap.String("customer_id", customerID)) // simulate opentracing instrumentation of an SQL query - if span := opentracing.SpanFromContext(ctx); span != nil { - span := d.tracer.StartSpan("SQL SELECT", opentracing.ChildOf(span.Context())) - tags.SpanKindRPCClient.Set(span) - tags.PeerService.Set(span, "mysql") - // #nosec - span.SetTag("sql.query", "SELECT * FROM customer WHERE customer_id="+customerID) - defer span.Finish() - ctx = opentracing.ContextWithSpan(ctx, span) - } + ctx, span := d.tracer.Start(ctx, "SQL SELECT", trace.WithSpanKind(trace.SpanKindClient)) + // #nosec + span.SetAttributes(semconv.PeerServiceKey.String("mysql"), attribute.Key("sql.query").String("SELECT * FROM customer WHERE customer_id=" + customerID)) + defer span.End() if !config.MySQLMutexDisabled { // simulate misconfigured connection pool that only gives one connection at a time diff --git a/examples/hotrod/services/customer/server.go b/examples/hotrod/services/customer/server.go index 7cbe0e6da4c..5a4336960e6 100644 --- a/examples/hotrod/services/customer/server.go +++ b/examples/hotrod/services/customer/server.go @@ -43,7 +43,7 @@ func NewServer(hostPort string, otelExporter string, metricsFactory metrics.Fact tracer: tracing.Init("customer", otelExporter, metricsFactory, logger), logger: logger, database: newDatabase( - tracing.Init("mysql", otelExporter, metricsFactory, logger), + tracing.InitOTEL("mysql", otelExporter, metricsFactory, logger).Tracer("mysql"), logger.With(zap.String("component", "mysql")), ), }