diff --git a/.github/ISSUE_TEMPLATE/version_release.md b/.github/ISSUE_TEMPLATE/version_release.md index f0a7c0a975a..c321e0eab5b 100644 --- a/.github/ISSUE_TEMPLATE/version_release.md +++ b/.github/ISSUE_TEMPLATE/version_release.md @@ -11,7 +11,7 @@ assignees: '' - [ ] Complete [Milestone](https://github.com/open-telemetry/opentelemetry-go/milestone/) -- [ ] Update contrib codebase to support changes about to be released (use a git sha version) +- [ ] [Update contrib codebase to support changes about to be released (use a git sha version)](https://github.com/open-telemetry/opentelemetry-go/blob/main/RELEASING.md#verify-changes-for-contrib-repository) - [ ] [Pre-release](https://github.com/open-telemetry/opentelemetry-go/blob/main/RELEASING.md#pre-release) - [ ] [Tag](https://github.com/open-telemetry/opentelemetry-go/blob/main/RELEASING.md#tag) - [ ] [Release](https://github.com/open-telemetry/opentelemetry-go/blob/main/RELEASING.md#release) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2458ae0ec01..0389b6e867b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,12 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230) - The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272) - Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286) +- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305) +- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315) + +### Fixed + +- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311) ## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24 diff --git a/CODEOWNERS b/CODEOWNERS index 31d336d9222..88f4c7d0e09 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole \ No newline at end of file +CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7847b459088..6aed3bd9d1d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -629,7 +629,6 @@ should be canceled. - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Chester Cheung](https://github.com/hanyuancheung), Tencent - [Damien Mathieu](https://github.com/dmathieu), Elastic -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Maintainers @@ -643,6 +642,7 @@ should be canceled. - [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS ### Become an Approver or a Maintainer diff --git a/RELEASING.md b/RELEASING.md index d2691d0bd8b..940f57f3d87 100644 --- a/RELEASING.md +++ b/RELEASING.md @@ -27,6 +27,12 @@ You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). +## Verify changes for contrib repository + +If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository. + +Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes. + ## Pre-Release First, decide which module sets will be released and update their versions diff --git a/bridge/opentracing/test/go.mod b/bridge/opentracing/test/go.mod index efe7c8ecb9a..19b678b699f 100644 --- a/bridge/opentracing/test/go.mod +++ b/bridge/opentracing/test/go.mod @@ -28,7 +28,7 @@ require ( golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 // indirect google.golang.org/protobuf v1.34.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/bridge/opentracing/test/go.sum b/bridge/opentracing/test/go.sum index 75834b64720..9871852673d 100644 --- a/bridge/opentracing/test/go.sum +++ b/bridge/opentracing/test/go.sum @@ -49,8 +49,8 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae h1:c55+MER4zkBS14uJhSZMGGmya0yJx5iHV4x/fpOSNRk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 h1:umK/Ey0QEzurTNlsV3R+MfxHAb78HCEX/IkuR+zH4WQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= diff --git a/example/otel-collector/docker-compose.yaml b/example/otel-collector/docker-compose.yaml index 02481bff4b6..fdaf11f243b 100644 --- a/example/otel-collector/docker-compose.yaml +++ b/example/otel-collector/docker-compose.yaml @@ -3,7 +3,7 @@ services: otel-collector: - image: otel/opentelemetry-collector-contrib:0.91.0 + image: otel/opentelemetry-collector-contrib:0.100.0 command: ["--config=/etc/otel-collector.yaml"] volumes: - ./otel-collector.yaml:/etc/otel-collector.yaml @@ -11,13 +11,13 @@ services: - 4317:4317 prometheus: - image: prom/prometheus:v2.45.2 + image: prom/prometheus:v2.52.0 volumes: - ./prometheus.yaml:/etc/prometheus/prometheus.yml ports: - 9090:9090 jaeger: - image: jaegertracing/all-in-one:1.52 + image: jaegertracing/all-in-one:1.57 ports: - 16686:16686 diff --git a/example/otel-collector/go.mod b/example/otel-collector/go.mod index 9961d6db6c0..e0aedaebd67 100644 --- a/example/otel-collector/go.mod +++ b/example/otel-collector/go.mod @@ -26,8 +26,8 @@ require ( golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 // indirect google.golang.org/protobuf v1.34.1 // indirect ) diff --git a/example/otel-collector/go.sum b/example/otel-collector/go.sum index 3f6d5147e36..17532150808 100644 --- a/example/otel-collector/go.sum +++ b/example/otel-collector/go.sum @@ -25,10 +25,10 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae h1:c55+MER4zkBS14uJhSZMGGmya0yJx5iHV4x/fpOSNRk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 h1:OpXbo8JnN8+jZGPrL4SSfaDjSCjupr8lXyBAbexEm/U= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 h1:umK/Ey0QEzurTNlsV3R+MfxHAb78HCEX/IkuR+zH4WQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= diff --git a/example/prometheus/go.mod b/example/prometheus/go.mod index 223a72d5f0f..511910155ea 100644 --- a/example/prometheus/go.mod +++ b/example/prometheus/go.mod @@ -3,7 +3,7 @@ module go.opentelemetry.io/otel/example/prometheus go 1.21 require ( - github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/client_golang v1.19.1 go.opentelemetry.io/otel v1.26.0 go.opentelemetry.io/otel/exporters/prometheus v0.48.0 go.opentelemetry.io/otel/metric v1.26.0 diff --git a/example/prometheus/go.sum b/example/prometheus/go.sum index 330335ff379..ca7607bebaf 100644 --- a/example/prometheus/go.sum +++ b/example/prometheus/go.sum @@ -13,8 +13,8 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= diff --git a/exporters/otlp/otlplog/otlploggrpc/config.go b/exporters/otlp/otlplog/otlploggrpc/config.go index fbae0bd57fc..25635aabdaa 100644 --- a/exporters/otlp/otlplog/otlploggrpc/config.go +++ b/exporters/otlp/otlplog/otlploggrpc/config.go @@ -8,6 +8,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry" ) // Option applies an option to the Exporter. @@ -32,9 +34,7 @@ func newConfig(options []Option) config { // // This configuration does not define any network retry strategy. That is // entirely handled by the gRPC ClientConn. -type RetryConfig struct { - // TODO: implement. -} +type RetryConfig retry.Config // WithInsecure disables client transport security for the Exporter's gRPC // connection, just like grpc.WithInsecure() diff --git a/exporters/otlp/otlplog/otlploggrpc/go.mod b/exporters/otlp/otlplog/otlploggrpc/go.mod index ad270a5f3b4..53b1a366ec9 100644 --- a/exporters/otlp/otlplog/otlploggrpc/go.mod +++ b/exporters/otlp/otlplog/otlploggrpc/go.mod @@ -3,6 +3,7 @@ module go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc go 1.21 require ( + github.com/cenkalti/backoff/v4 v4.3.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/otel/sdk/log v0.2.0-alpha google.golang.org/grpc v1.63.2 @@ -21,7 +22,7 @@ require ( golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 // indirect google.golang.org/protobuf v1.34.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlplog/otlploggrpc/go.sum b/exporters/otlp/otlplog/otlploggrpc/go.sum index 18aa7ac6897..bb818d1168e 100644 --- a/exporters/otlp/otlplog/otlploggrpc/go.sum +++ b/exporters/otlp/otlplog/otlploggrpc/go.sum @@ -1,3 +1,5 @@ +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -17,8 +19,8 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae h1:c55+MER4zkBS14uJhSZMGGmya0yJx5iHV4x/fpOSNRk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 h1:umK/Ey0QEzurTNlsV3R+MfxHAb78HCEX/IkuR+zH4WQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= diff --git a/exporters/otlp/otlplog/otlploggrpc/internal/gen.go b/exporters/otlp/otlplog/otlploggrpc/internal/gen.go new file mode 100644 index 00000000000..83a07d38c75 --- /dev/null +++ b/exporters/otlp/otlplog/otlploggrpc/internal/gen.go @@ -0,0 +1,7 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal" + +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go +//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go diff --git a/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go b/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go new file mode 100644 index 00000000000..7e59d510607 --- /dev/null +++ b/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry.go @@ -0,0 +1,145 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/retry/retry.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Package retry provides request retry functionality that can perform +// configurable exponential backoff for transient errors and honor any +// explicit throttle responses received. +package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc/internal/retry" + +import ( + "context" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" +) + +// DefaultConfig are the recommended defaults to use. +var DefaultConfig = Config{ + Enabled: true, + InitialInterval: 5 * time.Second, + MaxInterval: 30 * time.Second, + MaxElapsedTime: time.Minute, +} + +// Config defines configuration for retrying batches in case of export failure +// using an exponential backoff. +type Config struct { + // Enabled indicates whether to not retry sending batches in case of + // export failure. + Enabled bool + // InitialInterval the time to wait after the first failure before + // retrying. + InitialInterval time.Duration + // MaxInterval is the upper bound on backoff interval. Once this value is + // reached the delay between consecutive retries will always be + // `MaxInterval`. + MaxInterval time.Duration + // MaxElapsedTime is the maximum amount of time (including retries) spent + // trying to send a request/batch. Once this value is reached, the data + // is discarded. + MaxElapsedTime time.Duration +} + +// RequestFunc wraps a request with retry logic. +type RequestFunc func(context.Context, func(context.Context) error) error + +// EvaluateFunc returns if an error is retry-able and if an explicit throttle +// duration should be honored that was included in the error. +// +// The function must return true if the error argument is retry-able, +// otherwise it must return false for the first return parameter. +// +// The function must return a non-zero time.Duration if the error contains +// explicit throttle duration that should be honored, otherwise it must return +// a zero valued time.Duration. +type EvaluateFunc func(error) (bool, time.Duration) + +// RequestFunc returns a RequestFunc using the evaluate function to determine +// if requests can be retried and based on the exponential backoff +// configuration of c. +func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { + if !c.Enabled { + return func(ctx context.Context, fn func(context.Context) error) error { + return fn(ctx) + } + } + + return func(ctx context.Context, fn func(context.Context) error) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here + // must call Reset after changing the InitialInterval (this saves an + // unnecessary call to Now). + b := &backoff.ExponentialBackOff{ + InitialInterval: c.InitialInterval, + RandomizationFactor: backoff.DefaultRandomizationFactor, + Multiplier: backoff.DefaultMultiplier, + MaxInterval: c.MaxInterval, + MaxElapsedTime: c.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + b.Reset() + + for { + err := fn(ctx) + if err == nil { + return nil + } + + retryable, throttle := evaluate(err) + if !retryable { + return err + } + + bOff := b.NextBackOff() + if bOff == backoff.Stop { + return fmt.Errorf("max retry time elapsed: %w", err) + } + + // Wait for the greater of the backoff or throttle delay. + var delay time.Duration + if bOff > throttle { + delay = bOff + } else { + elapsed := b.GetElapsedTime() + if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime { + return fmt.Errorf("max retry time would elapse: %w", err) + } + delay = throttle + } + + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %s", ctxErr, err) + } + } + } +} + +// Allow override for testing. +var waitFunc = wait + +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. +func wait(ctx context.Context, delay time.Duration) error { + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + // Handle the case where the timer and context deadline end + // simultaneously by prioritizing the timer expiration nil value + // response. + select { + case <-timer.C: + default: + return ctx.Err() + } + case <-timer.C: + } + + return nil +} diff --git a/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry_test.go b/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry_test.go new file mode 100644 index 00000000000..b48dde62359 --- /dev/null +++ b/exporters/otlp/otlplog/otlploggrpc/internal/retry/retry_test.go @@ -0,0 +1,250 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/otlp/retry/retry_test.go.tmpl + +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package retry + +import ( + "context" + "errors" + "math" + "sync" + "testing" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/stretchr/testify/assert" +) + +func TestWait(t *testing.T) { + tests := []struct { + ctx context.Context + delay time.Duration + expected error + }{ + { + ctx: context.Background(), + delay: time.Duration(0), + }, + { + ctx: context.Background(), + delay: time.Duration(1), + }, + { + ctx: context.Background(), + delay: time.Duration(-1), + }, + { + ctx: func() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + return ctx + }(), + // Ensure the timer and context do not end simultaneously. + delay: 1 * time.Hour, + expected: context.Canceled, + }, + } + + for _, test := range tests { + err := wait(test.ctx, test.delay) + if test.expected == nil { + assert.NoError(t, err) + } else { + assert.ErrorIs(t, err, test.expected) + } + } +} + +func TestNonRetryableError(t *testing.T) { + ev := func(error) (bool, time.Duration) { return false, 0 } + + reqFunc := Config{ + Enabled: true, + InitialInterval: 1 * time.Nanosecond, + MaxInterval: 1 * time.Nanosecond, + // Never stop retrying. + MaxElapsedTime: 0, + }.RequestFunc(ev) + ctx := context.Background() + assert.NoError(t, reqFunc(ctx, func(context.Context) error { + return nil + })) + assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { + return assert.AnError + }), assert.AnError) +} + +func TestThrottledRetry(t *testing.T) { + // Ensure the throttle delay is used by making longer than backoff delay. + throttleDelay, backoffDelay := time.Second, time.Nanosecond + + ev := func(error) (bool, time.Duration) { + // Retry everything with a throttle delay. + return true, throttleDelay + } + + reqFunc := Config{ + Enabled: true, + InitialInterval: backoffDelay, + MaxInterval: backoffDelay, + // Never stop retrying. + MaxElapsedTime: 0, + }.RequestFunc(ev) + + origWait := waitFunc + var done bool + waitFunc = func(_ context.Context, delay time.Duration) error { + assert.Equal(t, throttleDelay, delay, "retry not throttled") + // Try twice to ensure call is attempted again after delay. + if done { + return assert.AnError + } + done = true + return nil + } + defer func() { waitFunc = origWait }() + + ctx := context.Background() + assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { + return errors.New("not this error") + }), assert.AnError) +} + +func TestBackoffRetry(t *testing.T) { + ev := func(error) (bool, time.Duration) { return true, 0 } + + delay := time.Nanosecond + reqFunc := Config{ + Enabled: true, + InitialInterval: delay, + MaxInterval: delay, + // Never stop retrying. + MaxElapsedTime: 0, + }.RequestFunc(ev) + + origWait := waitFunc + var done bool + waitFunc = func(_ context.Context, d time.Duration) error { + delta := math.Ceil(float64(delay) * backoff.DefaultRandomizationFactor) + assert.InDelta(t, delay, d, delta, "retry not backoffed") + // Try twice to ensure call is attempted again after delay. + if done { + return assert.AnError + } + done = true + return nil + } + t.Cleanup(func() { waitFunc = origWait }) + + ctx := context.Background() + assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { + return errors.New("not this error") + }), assert.AnError) +} + +func TestBackoffRetryCanceledContext(t *testing.T) { + ev := func(error) (bool, time.Duration) { return true, 0 } + + delay := time.Millisecond + reqFunc := Config{ + Enabled: true, + InitialInterval: delay, + MaxInterval: delay, + // Never stop retrying. + MaxElapsedTime: 10 * time.Millisecond, + }.RequestFunc(ev) + + ctx, cancel := context.WithCancel(context.Background()) + count := 0 + cancel() + err := reqFunc(ctx, func(context.Context) error { + count++ + return assert.AnError + }) + + assert.ErrorIs(t, err, context.Canceled) + assert.Contains(t, err.Error(), assert.AnError.Error()) + assert.Equal(t, 1, count) +} + +func TestThrottledRetryGreaterThanMaxElapsedTime(t *testing.T) { + // Ensure the throttle delay is used by making longer than backoff delay. + tDelay, bDelay := time.Hour, time.Nanosecond + ev := func(error) (bool, time.Duration) { return true, tDelay } + reqFunc := Config{ + Enabled: true, + InitialInterval: bDelay, + MaxInterval: bDelay, + MaxElapsedTime: tDelay - (time.Nanosecond), + }.RequestFunc(ev) + + ctx := context.Background() + assert.Contains(t, reqFunc(ctx, func(context.Context) error { + return assert.AnError + }).Error(), "max retry time would elapse: ") +} + +func TestMaxElapsedTime(t *testing.T) { + ev := func(error) (bool, time.Duration) { return true, 0 } + delay := time.Nanosecond + reqFunc := Config{ + Enabled: true, + // InitialInterval > MaxElapsedTime means immediate return. + InitialInterval: 2 * delay, + MaxElapsedTime: delay, + }.RequestFunc(ev) + + ctx := context.Background() + assert.Contains(t, reqFunc(ctx, func(context.Context) error { + return assert.AnError + }).Error(), "max retry time elapsed: ") +} + +func TestRetryNotEnabled(t *testing.T) { + ev := func(error) (bool, time.Duration) { + t.Error("evaluated retry when not enabled") + return false, 0 + } + + reqFunc := Config{}.RequestFunc(ev) + ctx := context.Background() + assert.NoError(t, reqFunc(ctx, func(context.Context) error { + return nil + })) + assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error { + return assert.AnError + }), assert.AnError) +} + +func TestRetryConcurrentSafe(t *testing.T) { + ev := func(error) (bool, time.Duration) { return true, 0 } + reqFunc := Config{ + Enabled: true, + }.RequestFunc(ev) + + var wg sync.WaitGroup + ctx := context.Background() + + for i := 1; i < 5; i++ { + wg.Add(1) + + go func() { + defer wg.Done() + + var done bool + assert.NoError(t, reqFunc(ctx, func(context.Context) error { + if !done { + done = true + return assert.AnError + } + + return nil + })) + }() + } + + wg.Wait() +} diff --git a/exporters/otlp/otlplog/otlploghttp/go.mod b/exporters/otlp/otlplog/otlploghttp/go.mod index 91321a5530d..9ea8c83d2b8 100644 --- a/exporters/otlp/otlplog/otlploghttp/go.mod +++ b/exporters/otlp/otlplog/otlploghttp/go.mod @@ -27,8 +27,8 @@ require ( golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 // indirect google.golang.org/grpc v1.63.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlplog/otlploghttp/go.sum b/exporters/otlp/otlplog/otlploghttp/go.sum index 5b9b3682f38..cc4098d8d06 100644 --- a/exporters/otlp/otlplog/otlploghttp/go.sum +++ b/exporters/otlp/otlplog/otlploghttp/go.sum @@ -30,10 +30,10 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae h1:c55+MER4zkBS14uJhSZMGGmya0yJx5iHV4x/fpOSNRk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 h1:OpXbo8JnN8+jZGPrL4SSfaDjSCjupr8lXyBAbexEm/U= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 h1:umK/Ey0QEzurTNlsV3R+MfxHAb78HCEX/IkuR+zH4WQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod index 77f7d981289..260fd10917b 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod @@ -12,7 +12,7 @@ require ( go.opentelemetry.io/otel/sdk v1.26.0 go.opentelemetry.io/otel/sdk/metric v1.26.0 go.opentelemetry.io/proto/otlp v1.2.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae + google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 google.golang.org/grpc v1.63.2 google.golang.org/protobuf v1.34.1 ) @@ -30,7 +30,7 @@ require ( golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum index 5b9b3682f38..cc4098d8d06 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum @@ -30,10 +30,10 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae h1:c55+MER4zkBS14uJhSZMGGmya0yJx5iHV4x/fpOSNRk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 h1:OpXbo8JnN8+jZGPrL4SSfaDjSCjupr8lXyBAbexEm/U= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 h1:umK/Ey0QEzurTNlsV3R+MfxHAb78HCEX/IkuR+zH4WQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod index 24122beb4b3..b296fb034db 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod @@ -29,8 +29,8 @@ require ( golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum index 5b9b3682f38..cc4098d8d06 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum @@ -30,10 +30,10 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae h1:c55+MER4zkBS14uJhSZMGGmya0yJx5iHV4x/fpOSNRk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 h1:OpXbo8JnN8+jZGPrL4SSfaDjSCjupr8lXyBAbexEm/U= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 h1:umK/Ey0QEzurTNlsV3R+MfxHAb78HCEX/IkuR+zH4WQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.mod b/exporters/otlp/otlptrace/otlptracegrpc/go.mod index e3c4dca5574..1d5da69d368 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/go.mod +++ b/exporters/otlp/otlptrace/otlptracegrpc/go.mod @@ -11,7 +11,7 @@ require ( go.opentelemetry.io/otel/trace v1.26.0 go.opentelemetry.io/proto/otlp v1.2.0 go.uber.org/goleak v1.3.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae + google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 google.golang.org/grpc v1.63.2 google.golang.org/protobuf v1.34.1 ) @@ -27,7 +27,7 @@ require ( golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.sum b/exporters/otlp/otlptrace/otlptracegrpc/go.sum index 278644dc017..70138571260 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/go.sum +++ b/exporters/otlp/otlptrace/otlptracegrpc/go.sum @@ -32,10 +32,10 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae h1:c55+MER4zkBS14uJhSZMGGmya0yJx5iHV4x/fpOSNRk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 h1:OpXbo8JnN8+jZGPrL4SSfaDjSCjupr8lXyBAbexEm/U= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 h1:umK/Ey0QEzurTNlsV3R+MfxHAb78HCEX/IkuR+zH4WQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= diff --git a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go index e3f7f431fe0..8f84a799632 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go @@ -256,6 +256,9 @@ func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { // Generic Options +// WithEndpoint configures the trace host and port only; endpoint should +// resemble "example.com" or "localhost:4317". To configure the scheme and path, +// use WithEndpointURL. func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Endpoint = endpoint @@ -263,6 +266,8 @@ func WithEndpoint(endpoint string) GenericOption { }) } +// WithEndpointURL configures the trace scheme, host, port, and path; the +// provided value should resemble "https://example.com:4318/v1/traces". func WithEndpointURL(v string) GenericOption { return newGenericOption(func(cfg Config) Config { u, err := url.Parse(v) diff --git a/exporters/otlp/otlptrace/otlptracegrpc/options.go b/exporters/otlp/otlptrace/otlptracegrpc/options.go index a9e7f933b34..bbad0e6d01e 100644 --- a/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -53,9 +53,11 @@ func WithInsecure() Option { return wrappedOption{otlpconfig.WithInsecure()} } -// WithEndpoint sets the target endpoint the Exporter will connect to. +// WithEndpoint sets the target endpoint (host and port) the Exporter will +// connect to. The provided endpoint should resemble "example.com:4317" (no +// scheme or path). // -// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // will take precedence. @@ -71,9 +73,11 @@ func WithEndpoint(endpoint string) Option { return wrappedOption{otlpconfig.WithEndpoint(endpoint)} } -// WithEndpointURL sets the target endpoint URL the Exporter will connect to. +// WithEndpointURL sets the target endpoint URL (scheme, host, port, path) +// the Exporter will connect to. The provided endpoint URL should resemble +// "https://example.com:4318/v1/traces". // -// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // will take precedence. @@ -84,7 +88,7 @@ func WithEndpoint(endpoint string) Option { // If an invalid URL is provided, the default value will be kept. // // By default, if an environment variable is not set, and this option is not -// passed, "localhost:4317" will be used. +// passed, "https://localhost:4317/v1/traces" will be used. // // This option has no effect if WithGRPCConn is used. func WithEndpointURL(u string) Option { diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.mod b/exporters/otlp/otlptrace/otlptracehttp/go.mod index ef993e89c46..79ffc258629 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/go.mod +++ b/exporters/otlp/otlptrace/otlptracehttp/go.mod @@ -25,8 +25,8 @@ require ( golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.sum b/exporters/otlp/otlptrace/otlptracehttp/go.sum index 5b9b3682f38..cc4098d8d06 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/go.sum +++ b/exporters/otlp/otlptrace/otlptracehttp/go.sum @@ -30,10 +30,10 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae h1:c55+MER4zkBS14uJhSZMGGmya0yJx5iHV4x/fpOSNRk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434 h1:OpXbo8JnN8+jZGPrL4SSfaDjSCjupr8lXyBAbexEm/U= +google.golang.org/genproto/googleapis/api v0.0.0-20240509183442-62759503f434/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434 h1:umK/Ey0QEzurTNlsV3R+MfxHAb78HCEX/IkuR+zH4WQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240509183442-62759503f434/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= diff --git a/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go index 45fefc4dd8c..2ebbc752f4b 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go +++ b/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go @@ -256,6 +256,9 @@ func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { // Generic Options +// WithEndpoint configures the trace host and port only; endpoint should +// resemble "example.com" or "localhost:4317". To configure the scheme and path, +// use WithEndpointURL. func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Endpoint = endpoint @@ -263,6 +266,8 @@ func WithEndpoint(endpoint string) GenericOption { }) } +// WithEndpointURL configures the trace scheme, host, port, and path; the +// provided value should resemble "https://example.com:4318/v1/traces". func WithEndpointURL(v string) GenericOption { return newGenericOption(func(cfg Config) Config { u, err := url.Parse(v) diff --git a/exporters/otlp/otlptrace/otlptracehttp/options.go b/exporters/otlp/otlptrace/otlptracehttp/options.go index 4b2758e4e51..6497f3ccdd0 100644 --- a/exporters/otlp/otlptrace/otlptracehttp/options.go +++ b/exporters/otlp/otlptrace/otlptracehttp/options.go @@ -56,12 +56,15 @@ func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config return w.ApplyHTTPOption(cfg) } -// WithEndpoint sets the target endpoint the Exporter will connect to. +// WithEndpoint sets the target endpoint (host and port) the Exporter will +// connect to. The provided endpoint should resemble "example.com:4318" (no +// scheme or path). // -// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT -// will take precedence. +// will take precedence. Note, both environment variables include the full +// scheme and path, while WithEndpoint sets only the host and port. // // If both this option and WithEndpointURL are used, the last used option will // take precedence. @@ -74,9 +77,10 @@ func WithEndpoint(endpoint string) Option { return wrappedOption{otlpconfig.WithEndpoint(endpoint)} } -// WithEndpointURL sets the target endpoint URL the Exporter will connect to. +// WithEndpointURL sets the target endpoint URL (scheme, host, port, path) the +// Exporter will connect to. // -// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // environment variable is set, and this option is not passed, that variable // value will be used. If both are set, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT // will take precedence. diff --git a/exporters/prometheus/go.mod b/exporters/prometheus/go.mod index a2cbd8db276..e0b31e42585 100644 --- a/exporters/prometheus/go.mod +++ b/exporters/prometheus/go.mod @@ -3,7 +3,7 @@ module go.opentelemetry.io/otel/exporters/prometheus go 1.21 require ( - github.com/prometheus/client_golang v1.19.0 + github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_model v0.6.1 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/otel v1.26.0 diff --git a/exporters/prometheus/go.sum b/exporters/prometheus/go.sum index 75a25778ff5..078353040f3 100644 --- a/exporters/prometheus/go.sum +++ b/exporters/prometheus/go.sum @@ -18,8 +18,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= -github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= diff --git a/exporters/stdout/stdoutlog/exporter_test.go b/exporters/stdout/stdoutlog/exporter_test.go index 606fba40b3f..659f7f462d7 100644 --- a/exporters/stdout/stdoutlog/exporter_test.go +++ b/exporters/stdout/stdoutlog/exporter_test.go @@ -183,7 +183,7 @@ func getJSON(now *time.Time) string { timestamps = "\"Timestamp\":" + string(serializedNow) + ",\"ObservedTimestamp\":" + string(serializedNow) + "," } - return "{" + timestamps + "\"Severity\":9,\"SeverityText\":\"INFO\",\"Body\":{},\"Attributes\":[{\"Key\":\"key\",\"Value\":{}},{\"Key\":\"key2\",\"Value\":{}},{\"Key\":\"key3\",\"Value\":{}},{\"Key\":\"key4\",\"Value\":{}},{\"Key\":\"key5\",\"Value\":{}},{\"Key\":\"bool\",\"Value\":{}}],\"TraceID\":\"0102030405060708090a0b0c0d0e0f10\",\"SpanID\":\"0102030405060708\",\"TraceFlags\":\"01\",\"Resource\":[{\"Key\":\"foo\",\"Value\":{\"Type\":\"STRING\",\"Value\":\"bar\"}}],\"Scope\":{\"Name\":\"name\",\"Version\":\"version\",\"SchemaURL\":\"https://example.com/custom-schema\"},\"DroppedAttributes\":10}\n" + return "{" + timestamps + "\"Severity\":9,\"SeverityText\":\"INFO\",\"Body\":{\"Type\":\"String\",\"Value\":\"test\"},\"Attributes\":[{\"Key\":\"key\",\"Value\":{\"Type\":\"String\",\"Value\":\"value\"}},{\"Key\":\"key2\",\"Value\":{\"Type\":\"String\",\"Value\":\"value\"}},{\"Key\":\"key3\",\"Value\":{\"Type\":\"String\",\"Value\":\"value\"}},{\"Key\":\"key4\",\"Value\":{\"Type\":\"String\",\"Value\":\"value\"}},{\"Key\":\"key5\",\"Value\":{\"Type\":\"String\",\"Value\":\"value\"}},{\"Key\":\"bool\",\"Value\":{\"Type\":\"Bool\",\"Value\":true}}],\"TraceID\":\"0102030405060708090a0b0c0d0e0f10\",\"SpanID\":\"0102030405060708\",\"TraceFlags\":\"01\",\"Resource\":[{\"Key\":\"foo\",\"Value\":{\"Type\":\"STRING\",\"Value\":\"bar\"}}],\"Scope\":{\"Name\":\"name\",\"Version\":\"version\",\"SchemaURL\":\"https://example.com/custom-schema\"},\"DroppedAttributes\":10}\n" } func getJSONs(now *time.Time) string { @@ -200,31 +200,52 @@ func getPrettyJSON(now *time.Time) string { return `{` + timestamps + ` "Severity": 9, "SeverityText": "INFO", - "Body": {}, + "Body": { + "Type": "String", + "Value": "test" + }, "Attributes": [ { "Key": "key", - "Value": {} + "Value": { + "Type": "String", + "Value": "value" + } }, { "Key": "key2", - "Value": {} + "Value": { + "Type": "String", + "Value": "value" + } }, { "Key": "key3", - "Value": {} + "Value": { + "Type": "String", + "Value": "value" + } }, { "Key": "key4", - "Value": {} + "Value": { + "Type": "String", + "Value": "value" + } }, { "Key": "key5", - "Value": {} + "Value": { + "Type": "String", + "Value": "value" + } }, { "Key": "bool", - "Value": {} + "Value": { + "Type": "Bool", + "Value": true + } } ], "TraceID": "0102030405060708090a0b0c0d0e0f10", @@ -344,3 +365,84 @@ func TestExporterConcurrentSafe(t *testing.T) { }) } } + +func TestValueMarshalJSON(t *testing.T) { + testCases := []struct { + value log.Value + want string + }{ + { + value: log.Empty("test").Value, + want: `{"Type":"Empty","Value":null}`, + }, + { + value: log.BoolValue(true), + want: `{"Type":"Bool","Value":true}`, + }, + { + value: log.Float64Value(3.14), + want: `{"Type":"Float64","Value":3.14}`, + }, + { + value: log.Int64Value(42), + want: `{"Type":"Int64","Value":42}`, + }, + { + value: log.StringValue("hello"), + want: `{"Type":"String","Value":"hello"}`, + }, + { + value: log.BytesValue([]byte{1, 2, 3}), + // The base64 encoding of []byte{1, 2, 3} is "AQID". + want: `{"Type":"Bytes","Value":"AQID"}`, + }, + { + value: log.SliceValue( + log.Empty("empty").Value, + log.BoolValue(true), + log.Float64Value(2.2), + log.IntValue(3), + log.StringValue("4"), + log.BytesValue([]byte{5}), + log.SliceValue( + log.IntValue(6), + log.MapValue( + log.Int("seven", 7), + ), + ), + log.MapValue( + log.Int("nine", 9), + ), + ), + want: `{"Type":"Slice","Value":[{"Type":"Empty","Value":null},{"Type":"Bool","Value":true},{"Type":"Float64","Value":2.2},{"Type":"Int64","Value":3},{"Type":"String","Value":"4"},{"Type":"Bytes","Value":"BQ=="},{"Type":"Slice","Value":[{"Type":"Int64","Value":6},{"Type":"Map","Value":[{"Key":"seven","Value":{"Type":"Int64","Value":7}}]}]},{"Type":"Map","Value":[{"Key":"nine","Value":{"Type":"Int64","Value":9}}]}]}`, + }, + { + value: log.MapValue( + log.Empty("empty"), + log.Bool("one", true), + log.Float64("two", 2.2), + log.Int("three", 3), + log.String("four", "4"), + log.Bytes("five", []byte{5}), + log.Slice("six", + log.IntValue(6), + log.MapValue( + log.Int("seven", 7), + ), + ), + log.Map("eight", + log.Int("nine", 9), + ), + ), + want: `{"Type":"Map","Value":[{"Key":"empty","Value":{"Type":"Empty","Value":null}},{"Key":"one","Value":{"Type":"Bool","Value":true}},{"Key":"two","Value":{"Type":"Float64","Value":2.2}},{"Key":"three","Value":{"Type":"Int64","Value":3}},{"Key":"four","Value":{"Type":"String","Value":"4"}},{"Key":"five","Value":{"Type":"Bytes","Value":"BQ=="}},{"Key":"six","Value":{"Type":"Slice","Value":[{"Type":"Int64","Value":6},{"Type":"Map","Value":[{"Key":"seven","Value":{"Type":"Int64","Value":7}}]}]}},{"Key":"eight","Value":{"Type":"Map","Value":[{"Key":"nine","Value":{"Type":"Int64","Value":9}}]}}]}`, + }, + } + + for _, tc := range testCases { + t.Run(tc.value.String(), func(t *testing.T) { + got, err := json.Marshal(value{Value: tc.value}) + require.NoError(t, err) + assert.JSONEq(t, tc.want, string(got)) + }) + } +} diff --git a/exporters/stdout/stdoutlog/record.go b/exporters/stdout/stdoutlog/record.go index 31a511dc15a..71512e23f93 100644 --- a/exporters/stdout/stdoutlog/record.go +++ b/exporters/stdout/stdoutlog/record.go @@ -4,6 +4,8 @@ package stdoutlog // import "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" import ( + "encoding/json" + "errors" "time" "go.opentelemetry.io/otel/log" @@ -13,14 +15,74 @@ import ( "go.opentelemetry.io/otel/trace" ) +func newValue(v log.Value) value { + return value{Value: v} +} + +type value struct { + log.Value +} + +// MarshalJSON implements a custom marshal function to encode log.Value. +func (v value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Kind().String() + + switch v.Kind() { + case log.KindString: + jsonVal.Value = v.AsString() + case log.KindInt64: + jsonVal.Value = v.AsInt64() + case log.KindFloat64: + jsonVal.Value = v.AsFloat64() + case log.KindBool: + jsonVal.Value = v.AsBool() + case log.KindBytes: + jsonVal.Value = v.AsBytes() + case log.KindMap: + m := v.AsMap() + values := make([]keyValue, 0, len(m)) + for _, kv := range m { + values = append(values, keyValue{ + Key: kv.Key, + Value: newValue(kv.Value), + }) + } + + jsonVal.Value = values + case log.KindSlice: + s := v.AsSlice() + values := make([]value, 0, len(s)) + for _, e := range s { + values = append(values, newValue(e)) + } + + jsonVal.Value = values + case log.KindEmpty: + jsonVal.Value = nil + default: + return nil, errors.New("invalid Kind") + } + + return json.Marshal(jsonVal) +} + +type keyValue struct { + Key string + Value value +} + // recordJSON is a JSON-serializable representation of a Record. type recordJSON struct { Timestamp *time.Time `json:",omitempty"` ObservedTimestamp *time.Time `json:",omitempty"` Severity log.Severity SeverityText string - Body log.Value - Attributes []log.KeyValue + Body value + Attributes []keyValue TraceID trace.TraceID SpanID trace.SpanID TraceFlags trace.TraceFlags @@ -34,13 +96,13 @@ func (e *Exporter) newRecordJSON(r sdklog.Record) recordJSON { newRecord := recordJSON{ Severity: r.Severity(), SeverityText: r.SeverityText(), - Body: r.Body(), + Body: newValue(r.Body()), TraceID: r.TraceID(), SpanID: r.SpanID(), TraceFlags: r.TraceFlags(), - Attributes: make([]log.KeyValue, 0, r.AttributesLen()), + Attributes: make([]keyValue, 0, r.AttributesLen()), Resource: &res, Scope: r.InstrumentationScope(), @@ -49,7 +111,10 @@ func (e *Exporter) newRecordJSON(r sdklog.Record) recordJSON { } r.WalkAttributes(func(kv log.KeyValue) bool { - newRecord.Attributes = append(newRecord.Attributes, kv) + newRecord.Attributes = append(newRecord.Attributes, keyValue{ + Key: kv.Key, + Value: newValue(kv.Value), + }) return true }) diff --git a/internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl b/internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl index 0a64afe3ce6..a867b707e89 100644 --- a/internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl +++ b/internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl @@ -256,6 +256,9 @@ func NewGRPCOption(fn func(cfg Config) Config) GRPCOption { // Generic Options +// WithEndpoint configures the trace host and port only; endpoint should +// resemble "example.com" or "localhost:4317". To configure the scheme and path, +// use WithEndpointURL. func WithEndpoint(endpoint string) GenericOption { return newGenericOption(func(cfg Config) Config { cfg.Traces.Endpoint = endpoint @@ -263,6 +266,8 @@ func WithEndpoint(endpoint string) GenericOption { }) } +// WithEndpointURL configures the trace scheme, host, port, and path; the +// provided value should resemble "https://example.com:4318/v1/traces". func WithEndpointURL(v string) GenericOption { return newGenericOption(func(cfg Config) Config { u, err := url.Parse(v) diff --git a/sdk/metric/instrument_test.go b/sdk/metric/instrument_test.go index 712fddc4558..60066f425aa 100644 --- a/sdk/metric/instrument_test.go +++ b/sdk/metric/instrument_test.go @@ -25,7 +25,7 @@ func BenchmarkInstrument(b *testing.B) { build := aggregate.Builder[int64]{} var meas []aggregate.Measure[int64] - in, _ := build.LastValue() + in, _ := build.PrecomputedLastValue() meas = append(meas, in) build.Temporality = metricdata.CumulativeTemporality @@ -50,7 +50,7 @@ func BenchmarkInstrument(b *testing.B) { build := aggregate.Builder[int64]{} var meas []aggregate.Measure[int64] - in, _ := build.LastValue() + in, _ := build.PrecomputedLastValue() meas = append(meas, in) build.Temporality = metricdata.CumulativeTemporality diff --git a/sdk/metric/internal/aggregate/aggregate.go b/sdk/metric/internal/aggregate/aggregate.go index 0a97444a4be..c9976de6c78 100644 --- a/sdk/metric/internal/aggregate/aggregate.go +++ b/sdk/metric/internal/aggregate/aggregate.go @@ -74,21 +74,26 @@ func (b Builder[N]) filter(f fltrMeasure[N]) Measure[N] { } // LastValue returns a last-value aggregate function input and output. -// -// The Builder.Temporality is ignored and delta is use always. func (b Builder[N]) LastValue() (Measure[N], ComputeAggregation) { - // Delta temporality is the only temporality that makes semantic sense for - // a last-value aggregate. lv := newLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative + } +} - return b.filter(lv.measure), func(dest *metricdata.Aggregation) int { - // Ignore if dest is not a metricdata.Gauge. The chance for memory - // reuse of the DataPoints is missed (better luck next time). - gData, _ := (*dest).(metricdata.Gauge[N]) - lv.computeAggregation(&gData.DataPoints) - *dest = gData - - return len(gData.DataPoints) +// PrecomputedLastValue returns a last-value aggregate function input and +// output. The aggregation returned from the returned ComputeAggregation +// function will always only return values from the previous collection cycle. +func (b Builder[N]) PrecomputedLastValue() (Measure[N], ComputeAggregation) { + lv := newPrecomputedLastValue[N](b.AggregationLimit, b.resFunc()) + switch b.Temporality { + case metricdata.DeltaTemporality: + return b.filter(lv.measure), lv.delta + default: + return b.filter(lv.measure), lv.cumulative } } diff --git a/sdk/metric/internal/aggregate/aggregate_test.go b/sdk/metric/internal/aggregate/aggregate_test.go index 1afb8a99258..37c310a60e3 100644 --- a/sdk/metric/internal/aggregate/aggregate_test.go +++ b/sdk/metric/internal/aggregate/aggregate_test.go @@ -6,6 +6,7 @@ package aggregate // import "go.opentelemetry.io/otel/sdk/metric/internal/aggreg import ( "context" "strconv" + "sync/atomic" "testing" "time" @@ -39,16 +40,39 @@ var ( fltrBob = attribute.NewSet(userBob) // Sat Jan 01 2000 00:00:00 GMT+0000. - staticTime = time.Unix(946684800, 0) - staticNowFunc = func() time.Time { return staticTime } - // Pass to t.Cleanup to override the now function with staticNowFunc and - // revert once the test completes. E.g. t.Cleanup(mockTime(now)). - mockTime = func(orig func() time.Time) (cleanup func()) { - now = staticNowFunc - return func() { now = orig } - } + y2k = time.Unix(946684800, 0) ) +// y2kPlus returns the timestamp at n seconds past Sat Jan 01 2000 00:00:00 GMT+0000. +func y2kPlus(n int64) time.Time { + d := time.Duration(n) * time.Second + return y2k.Add(d) +} + +// clock is a test clock. It provides a predictable value for now() that can be +// reset. +type clock struct { + ticks atomic.Int64 +} + +// Now returns the mocked time starting at y2kPlus(0). Each call to Now will +// increment the returned value by one second. +func (c *clock) Now() time.Time { + old := c.ticks.Add(1) - 1 + return y2kPlus(old) +} + +// Reset resets the clock c to tick from y2kPlus(0). +func (c *clock) Reset() { c.ticks.Store(0) } + +// Register registers clock c's Now method as the now var. It returns an +// unregister func that should be called to restore the original now value. +func (c *clock) Register() (unregister func()) { + orig := now + now = c.Now + return func() { now = orig } +} + func dropExemplars[N int64 | float64]() exemplar.Reservoir { return exemplar.Drop() } diff --git a/sdk/metric/internal/aggregate/exponential_histogram_test.go b/sdk/metric/internal/aggregate/exponential_histogram_test.go index bea3f771615..2ffd3ebf0bf 100644 --- a/sdk/metric/internal/aggregate/exponential_histogram_test.go +++ b/sdk/metric/internal/aggregate/exponential_histogram_test.go @@ -727,11 +727,18 @@ func TestSubNormal(t *testing.T) { } func TestExponentialHistogramAggregation(t *testing.T) { - t.Cleanup(mockTime(now)) + c := new(clock) + t.Cleanup(c.Register()) t.Run("Int64/Delta", testDeltaExpoHist[int64]()) + c.Reset() + t.Run("Float64/Delta", testDeltaExpoHist[float64]()) + c.Reset() + t.Run("Int64/Cumulative", testCumulativeExpoHist[int64]()) + c.Reset() + t.Run("Float64/Cumulative", testCumulativeExpoHist[float64]()) } @@ -770,8 +777,8 @@ func testDeltaExpoHist[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.ExponentialHistogramDataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(1), + Time: y2kPlus(9), Count: 7, Min: metricdata.NewExtrema[N](-1), Max: metricdata.NewExtrema[N](16), @@ -825,8 +832,8 @@ func testDeltaExpoHist[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.ExponentialHistogramDataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(10), + Time: y2kPlus(24), Count: 7, Min: metricdata.NewExtrema[N](-1), Max: metricdata.NewExtrema[N](16), @@ -843,8 +850,8 @@ func testDeltaExpoHist[N int64 | float64]() func(t *testing.T) { }, { Attributes: overflowSet, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(10), + Time: y2kPlus(24), Count: 6, Min: metricdata.NewExtrema[N](1), Max: metricdata.NewExtrema[N](16), @@ -897,8 +904,8 @@ func testCumulativeExpoHist[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.ExponentialHistogramDataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(9), Count: 7, Min: metricdata.NewExtrema[N](-1), Max: metricdata.NewExtrema[N](16), @@ -930,8 +937,8 @@ func testCumulativeExpoHist[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.ExponentialHistogramDataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(13), Count: 10, Min: metricdata.NewExtrema[N](-1), Max: metricdata.NewExtrema[N](16), @@ -959,8 +966,8 @@ func testCumulativeExpoHist[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.ExponentialHistogramDataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(14), Count: 10, Min: metricdata.NewExtrema[N](-1), Max: metricdata.NewExtrema[N](16), @@ -996,8 +1003,8 @@ func testCumulativeExpoHist[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.ExponentialHistogramDataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(21), Count: 10, Min: metricdata.NewExtrema[N](-1), Max: metricdata.NewExtrema[N](16), @@ -1014,8 +1021,8 @@ func testCumulativeExpoHist[N int64 | float64]() func(t *testing.T) { }, { Attributes: overflowSet, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(21), Count: 6, Min: metricdata.NewExtrema[N](1), Max: metricdata.NewExtrema[N](16), diff --git a/sdk/metric/internal/aggregate/histogram_test.go b/sdk/metric/internal/aggregate/histogram_test.go index aeedc55d91b..38ba1229eb2 100644 --- a/sdk/metric/internal/aggregate/histogram_test.go +++ b/sdk/metric/internal/aggregate/histogram_test.go @@ -7,6 +7,7 @@ import ( "context" "sort" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -22,22 +23,30 @@ var ( ) func TestHistogram(t *testing.T) { - t.Cleanup(mockTime(now)) + c := new(clock) + t.Cleanup(c.Register()) t.Run("Int64/Delta/Sum", testDeltaHist[int64](conf[int64]{hPt: hPointSummed[int64]})) + c.Reset() t.Run("Int64/Delta/NoSum", testDeltaHist[int64](conf[int64]{noSum: true, hPt: hPoint[int64]})) + c.Reset() t.Run("Float64/Delta/Sum", testDeltaHist[float64](conf[float64]{hPt: hPointSummed[float64]})) + c.Reset() t.Run("Float64/Delta/NoSum", testDeltaHist[float64](conf[float64]{noSum: true, hPt: hPoint[float64]})) + c.Reset() t.Run("Int64/Cumulative/Sum", testCumulativeHist[int64](conf[int64]{hPt: hPointSummed[int64]})) + c.Reset() t.Run("Int64/Cumulative/NoSum", testCumulativeHist[int64](conf[int64]{noSum: true, hPt: hPoint[int64]})) + c.Reset() t.Run("Float64/Cumulative/Sum", testCumulativeHist[float64](conf[float64]{hPt: hPointSummed[float64]})) + c.Reset() t.Run("Float64/Cumulative/NoSum", testCumulativeHist[float64](conf[float64]{noSum: true, hPt: hPoint[float64]})) } type conf[N int64 | float64] struct { noSum bool - hPt func(attribute.Set, N, uint64) metricdata.HistogramDataPoint[N] + hPt func(attribute.Set, N, uint64, time.Time, time.Time) metricdata.HistogramDataPoint[N] } func testDeltaHist[N int64 | float64](c conf[N]) func(t *testing.T) { @@ -71,8 +80,8 @@ func testDeltaHist[N int64 | float64](c conf[N]) func(t *testing.T) { agg: metricdata.Histogram[N]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ - c.hPt(fltrAlice, 2, 3), - c.hPt(fltrBob, 10, 2), + c.hPt(fltrAlice, 2, 3, y2kPlus(1), y2kPlus(7)), + c.hPt(fltrBob, 10, 2, y2kPlus(1), y2kPlus(7)), }, }, }, @@ -87,8 +96,8 @@ func testDeltaHist[N int64 | float64](c conf[N]) func(t *testing.T) { agg: metricdata.Histogram[N]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ - c.hPt(fltrAlice, 10, 1), - c.hPt(fltrBob, 3, 1), + c.hPt(fltrAlice, 10, 1, y2kPlus(7), y2kPlus(10)), + c.hPt(fltrBob, 3, 1, y2kPlus(7), y2kPlus(10)), }, }, }, @@ -117,9 +126,9 @@ func testDeltaHist[N int64 | float64](c conf[N]) func(t *testing.T) { agg: metricdata.Histogram[N]{ Temporality: metricdata.DeltaTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ - c.hPt(fltrAlice, 1, 1), - c.hPt(fltrBob, 1, 1), - c.hPt(overflowSet, 1, 2), + c.hPt(fltrAlice, 1, 1, y2kPlus(11), y2kPlus(16)), + c.hPt(fltrBob, 1, 1, y2kPlus(11), y2kPlus(16)), + c.hPt(overflowSet, 1, 2, y2kPlus(11), y2kPlus(16)), }, }, }, @@ -158,8 +167,8 @@ func testCumulativeHist[N int64 | float64](c conf[N]) func(t *testing.T) { agg: metricdata.Histogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ - c.hPt(fltrAlice, 2, 3), - c.hPt(fltrBob, 10, 2), + c.hPt(fltrAlice, 2, 3, y2kPlus(0), y2kPlus(7)), + c.hPt(fltrBob, 10, 2, y2kPlus(0), y2kPlus(7)), }, }, }, @@ -174,8 +183,8 @@ func testCumulativeHist[N int64 | float64](c conf[N]) func(t *testing.T) { agg: metricdata.Histogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ - c.hPt(fltrAlice, 2, 4), - c.hPt(fltrBob, 10, 3), + c.hPt(fltrAlice, 2, 4, y2kPlus(0), y2kPlus(10)), + c.hPt(fltrBob, 10, 3, y2kPlus(0), y2kPlus(10)), }, }, }, @@ -187,8 +196,8 @@ func testCumulativeHist[N int64 | float64](c conf[N]) func(t *testing.T) { agg: metricdata.Histogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ - c.hPt(fltrAlice, 2, 4), - c.hPt(fltrBob, 10, 3), + c.hPt(fltrAlice, 2, 4, y2kPlus(0), y2kPlus(11)), + c.hPt(fltrBob, 10, 3, y2kPlus(0), y2kPlus(11)), }, }, }, @@ -204,9 +213,9 @@ func testCumulativeHist[N int64 | float64](c conf[N]) func(t *testing.T) { agg: metricdata.Histogram[N]{ Temporality: metricdata.CumulativeTemporality, DataPoints: []metricdata.HistogramDataPoint[N]{ - c.hPt(fltrAlice, 2, 4), - c.hPt(fltrBob, 10, 3), - c.hPt(overflowSet, 1, 2), + c.hPt(fltrAlice, 2, 4, y2kPlus(0), y2kPlus(14)), + c.hPt(fltrBob, 10, 3, y2kPlus(0), y2kPlus(14)), + c.hPt(overflowSet, 1, 2, y2kPlus(0), y2kPlus(14)), }, }, }, @@ -216,14 +225,14 @@ func testCumulativeHist[N int64 | float64](c conf[N]) func(t *testing.T) { // hPointSummed returns an HistogramDataPoint that started and ended now with // multi number of measurements values v. It includes a min and max (set to v). -func hPointSummed[N int64 | float64](a attribute.Set, v N, multi uint64) metricdata.HistogramDataPoint[N] { +func hPointSummed[N int64 | float64](a attribute.Set, v N, multi uint64, start, t time.Time) metricdata.HistogramDataPoint[N] { idx := sort.SearchFloat64s(bounds, float64(v)) counts := make([]uint64, len(bounds)+1) counts[idx] += multi return metricdata.HistogramDataPoint[N]{ Attributes: a, - StartTime: now(), - Time: now(), + StartTime: start, + Time: t, Count: multi, Bounds: bounds, BucketCounts: counts, @@ -235,14 +244,14 @@ func hPointSummed[N int64 | float64](a attribute.Set, v N, multi uint64) metricd // hPoint returns an HistogramDataPoint that started and ended now with multi // number of measurements values v. It includes a min and max (set to v). -func hPoint[N int64 | float64](a attribute.Set, v N, multi uint64) metricdata.HistogramDataPoint[N] { +func hPoint[N int64 | float64](a attribute.Set, v N, multi uint64, start, t time.Time) metricdata.HistogramDataPoint[N] { idx := sort.SearchFloat64s(bounds, float64(v)) counts := make([]uint64, len(bounds)+1) counts[idx] += multi return metricdata.HistogramDataPoint[N]{ Attributes: a, - StartTime: now(), - Time: now(), + StartTime: start, + Time: t, Count: multi, Bounds: bounds, BucketCounts: counts, @@ -334,7 +343,9 @@ func TestCumulativeHistogramImutableCounts(t *testing.T) { } func TestDeltaHistogramReset(t *testing.T) { - t.Cleanup(mockTime(now)) + orig := now + now = func() time.Time { return y2k } + t.Cleanup(func() { now = orig }) h := newHistogram[int64](bounds, noMinMax, false, 0, dropExemplars[int64]) @@ -345,7 +356,7 @@ func TestDeltaHistogramReset(t *testing.T) { h.measure(context.Background(), 1, alice, nil) expect := metricdata.Histogram[int64]{Temporality: metricdata.DeltaTemporality} - expect.DataPoints = []metricdata.HistogramDataPoint[int64]{hPointSummed[int64](alice, 1, 1)} + expect.DataPoints = []metricdata.HistogramDataPoint[int64]{hPointSummed[int64](alice, 1, 1, now(), now())} h.delta(&data) metricdatatest.AssertAggregationsEqual(t, expect, data) @@ -356,7 +367,7 @@ func TestDeltaHistogramReset(t *testing.T) { // Aggregating another set should not affect the original (alice). h.measure(context.Background(), 1, bob, nil) - expect.DataPoints = []metricdata.HistogramDataPoint[int64]{hPointSummed[int64](bob, 1, 1)} + expect.DataPoints = []metricdata.HistogramDataPoint[int64]{hPointSummed[int64](bob, 1, 1, now(), now())} h.delta(&data) metricdatatest.AssertAggregationsEqual(t, expect, data) } diff --git a/sdk/metric/internal/aggregate/lastvalue.go b/sdk/metric/internal/aggregate/lastvalue.go index f3238974c6a..8f406dd2bcb 100644 --- a/sdk/metric/internal/aggregate/lastvalue.go +++ b/sdk/metric/internal/aggregate/lastvalue.go @@ -26,6 +26,7 @@ func newLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir) *la newRes: r, limit: newLimiter[datapoint[N]](limit), values: make(map[attribute.Distinct]datapoint[N]), + start: now(), } } @@ -36,6 +37,7 @@ type lastValue[N int64 | float64] struct { newRes func() exemplar.Reservoir limit limiter[datapoint[N]] values map[attribute.Distinct]datapoint[N] + start time.Time } func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute.Set, droppedAttr []attribute.KeyValue) { @@ -58,23 +60,103 @@ func (s *lastValue[N]) measure(ctx context.Context, value N, fltrAttr attribute. s.values[attr.Equivalent()] = d } -func (s *lastValue[N]) computeAggregation(dest *[]metricdata.DataPoint[N]) { +func (s *lastValue[N]) delta(dest *metricdata.Aggregation) int { + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints) + // Do not report stale values. + clear(s.values) + // Update start time for delta temporality. + s.start = now() + + *dest = gData + + return n +} + +func (s *lastValue[N]) cumulative(dest *metricdata.Aggregation) int { + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + s.Lock() defer s.Unlock() + n := s.copyDpts(&gData.DataPoints) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + *dest = gData + + return n +} + +// copyDpts copies the datapoints held by s into dest. The number of datapoints +// copied is returned. +func (s *lastValue[N]) copyDpts(dest *[]metricdata.DataPoint[N]) int { n := len(s.values) *dest = reset(*dest, n, n) var i int for _, v := range s.values { (*dest)[i].Attributes = v.attrs - // The event time is the only meaningful timestamp, StartTime is - // ignored. + (*dest)[i].StartTime = s.start (*dest)[i].Time = v.timestamp (*dest)[i].Value = v.value collectExemplars(&(*dest)[i].Exemplars, v.res.Collect) i++ } + return n +} + +// newPrecomputedLastValue returns an aggregator that summarizes a set of +// observations as the last one made. +func newPrecomputedLastValue[N int64 | float64](limit int, r func() exemplar.Reservoir) *precomputedLastValue[N] { + return &precomputedLastValue[N]{lastValue: newLastValue[N](limit, r)} +} + +// precomputedLastValue summarizes a set of observations as the last one made. +type precomputedLastValue[N int64 | float64] struct { + *lastValue[N] +} + +func (s *precomputedLastValue[N]) delta(dest *metricdata.Aggregation) int { + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints) // Do not report stale values. clear(s.values) + // Update start time for delta temporality. + s.start = now() + + *dest = gData + + return n +} + +func (s *precomputedLastValue[N]) cumulative(dest *metricdata.Aggregation) int { + // Ignore if dest is not a metricdata.Gauge. The chance for memory reuse of + // the DataPoints is missed (better luck next time). + gData, _ := (*dest).(metricdata.Gauge[N]) + + s.Lock() + defer s.Unlock() + + n := s.copyDpts(&gData.DataPoints) + // Do not report stale values. + clear(s.values) + *dest = gData + + return n } diff --git a/sdk/metric/internal/aggregate/lastvalue_test.go b/sdk/metric/internal/aggregate/lastvalue_test.go index 66ef3e785ab..8504e3b192e 100644 --- a/sdk/metric/internal/aggregate/lastvalue_test.go +++ b/sdk/metric/internal/aggregate/lastvalue_test.go @@ -11,14 +11,32 @@ import ( ) func TestLastValue(t *testing.T) { - t.Cleanup(mockTime(now)) + c := new(clock) + t.Cleanup(c.Register()) - t.Run("Int64", testLastValue[int64]()) - t.Run("Float64", testLastValue[float64]()) + t.Run("Int64/DeltaLastValue", testDeltaLastValue[int64]()) + c.Reset() + t.Run("Float64/DeltaLastValue", testDeltaLastValue[float64]()) + c.Reset() + + t.Run("Int64/CumulativeLastValue", testCumulativeLastValue[int64]()) + c.Reset() + t.Run("Float64/CumulativeLastValue", testCumulativeLastValue[float64]()) + c.Reset() + + t.Run("Int64/DeltaPrecomputedLastValue", testDeltaPrecomputedLastValue[int64]()) + c.Reset() + t.Run("Float64/DeltaPrecomputedLastValue", testDeltaPrecomputedLastValue[float64]()) + c.Reset() + + t.Run("Int64/CumulativePrecomputedLastValue", testCumulativePrecomputedLastValue[int64]()) + c.Reset() + t.Run("Float64/CumulativePrecomputedLastValue", testCumulativePrecomputedLastValue[float64]()) } -func testLastValue[N int64 | float64]() func(*testing.T) { +func testDeltaLastValue[N int64 | float64]() func(*testing.T) { in, out := Builder[N]{ + Temporality: metricdata.DeltaTemporality, Filter: attrFltr, AggregationLimit: 3, }.LastValue() @@ -42,12 +60,344 @@ func testLastValue[N int64 | float64]() func(*testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - Time: staticTime, + StartTime: y2kPlus(1), + Time: y2kPlus(5), + Value: 2, + }, + { + Attributes: fltrBob, + StartTime: y2kPlus(1), + Time: y2kPlus(6), + Value: -10, + }, + }, + }, + }, + }, { + // Everything resets, do not report old measurements. + input: []arg[N]{}, + expect: output{n: 0, agg: metricdata.Gauge[N]{}}, + }, { + input: []arg[N]{ + {ctx, 10, alice}, + {ctx, 3, bob}, + }, + expect: output{ + n: 2, + agg: metricdata.Gauge[N]{ + DataPoints: []metricdata.DataPoint[N]{ + { + Attributes: fltrAlice, + StartTime: y2kPlus(8), + Time: y2kPlus(9), + Value: 10, + }, + { + Attributes: fltrBob, + StartTime: y2kPlus(8), + Time: y2kPlus(10), + Value: 3, + }, + }, + }, + }, + }, { + input: []arg[N]{ + {ctx, 1, alice}, + {ctx, 1, bob}, + // These will exceed cardinality limit. + {ctx, 1, carol}, + {ctx, 1, dave}, + }, + expect: output{ + n: 3, + agg: metricdata.Gauge[N]{ + DataPoints: []metricdata.DataPoint[N]{ + { + Attributes: fltrAlice, + StartTime: y2kPlus(11), + Time: y2kPlus(12), + Value: 1, + }, + { + Attributes: fltrBob, + StartTime: y2kPlus(11), + Time: y2kPlus(13), + Value: 1, + }, + { + Attributes: overflowSet, + StartTime: y2kPlus(11), + Time: y2kPlus(15), + Value: 1, + }, + }, + }, + }, + }, + }) +} + +func testCumulativeLastValue[N int64 | float64]() func(*testing.T) { + in, out := Builder[N]{ + Temporality: metricdata.CumulativeTemporality, + Filter: attrFltr, + AggregationLimit: 3, + }.LastValue() + ctx := context.Background() + return test[N](in, out, []teststep[N]{ + { + // Empty output if nothing is measured. + input: []arg[N]{}, + expect: output{n: 0, agg: metricdata.Gauge[N]{}}, + }, { + input: []arg[N]{ + {ctx, 1, alice}, + {ctx, -1, bob}, + {ctx, 1, fltrAlice}, + {ctx, 2, alice}, + {ctx, -10, bob}, + }, + expect: output{ + n: 2, + agg: metricdata.Gauge[N]{ + DataPoints: []metricdata.DataPoint[N]{ + { + Attributes: fltrAlice, + StartTime: y2kPlus(0), + Time: y2kPlus(4), + Value: 2, + }, + { + Attributes: fltrBob, + StartTime: y2kPlus(0), + Time: y2kPlus(5), + Value: -10, + }, + }, + }, + }, + }, { + // Cumulative temporality means no resets. + input: []arg[N]{}, + expect: output{ + n: 2, + agg: metricdata.Gauge[N]{ + DataPoints: []metricdata.DataPoint[N]{ + { + Attributes: fltrAlice, + StartTime: y2kPlus(0), + Time: y2kPlus(4), + Value: 2, + }, + { + Attributes: fltrBob, + StartTime: y2kPlus(0), + Time: y2kPlus(5), + Value: -10, + }, + }, + }, + }, + }, { + input: []arg[N]{ + {ctx, 10, alice}, + {ctx, 3, bob}, + }, + expect: output{ + n: 2, + agg: metricdata.Gauge[N]{ + DataPoints: []metricdata.DataPoint[N]{ + { + Attributes: fltrAlice, + StartTime: y2kPlus(0), + Time: y2kPlus(6), + Value: 10, + }, + { + Attributes: fltrBob, + StartTime: y2kPlus(0), + Time: y2kPlus(7), + Value: 3, + }, + }, + }, + }, + }, { + input: []arg[N]{ + {ctx, 1, alice}, + {ctx, 1, bob}, + // These will exceed cardinality limit. + {ctx, 1, carol}, + {ctx, 1, dave}, + }, + expect: output{ + n: 3, + agg: metricdata.Gauge[N]{ + DataPoints: []metricdata.DataPoint[N]{ + { + Attributes: fltrAlice, + StartTime: y2kPlus(0), + Time: y2kPlus(8), + Value: 1, + }, + { + Attributes: fltrBob, + StartTime: y2kPlus(0), + Time: y2kPlus(9), + Value: 1, + }, + { + Attributes: overflowSet, + StartTime: y2kPlus(0), + Time: y2kPlus(11), + Value: 1, + }, + }, + }, + }, + }, + }) +} + +func testDeltaPrecomputedLastValue[N int64 | float64]() func(*testing.T) { + in, out := Builder[N]{ + Temporality: metricdata.DeltaTemporality, + Filter: attrFltr, + AggregationLimit: 3, + }.PrecomputedLastValue() + ctx := context.Background() + return test[N](in, out, []teststep[N]{ + { + // Empty output if nothing is measured. + input: []arg[N]{}, + expect: output{n: 0, agg: metricdata.Gauge[N]{}}, + }, { + input: []arg[N]{ + {ctx, 1, alice}, + {ctx, -1, bob}, + {ctx, 1, fltrAlice}, + {ctx, 2, alice}, + {ctx, -10, bob}, + }, + expect: output{ + n: 2, + agg: metricdata.Gauge[N]{ + DataPoints: []metricdata.DataPoint[N]{ + { + Attributes: fltrAlice, + StartTime: y2kPlus(1), + Time: y2kPlus(5), + Value: 2, + }, + { + Attributes: fltrBob, + StartTime: y2kPlus(1), + Time: y2kPlus(6), + Value: -10, + }, + }, + }, + }, + }, { + // Everything resets, do not report old measurements. + input: []arg[N]{}, + expect: output{n: 0, agg: metricdata.Gauge[N]{}}, + }, { + input: []arg[N]{ + {ctx, 10, alice}, + {ctx, 3, bob}, + }, + expect: output{ + n: 2, + agg: metricdata.Gauge[N]{ + DataPoints: []metricdata.DataPoint[N]{ + { + Attributes: fltrAlice, + StartTime: y2kPlus(8), + Time: y2kPlus(9), + Value: 10, + }, + { + Attributes: fltrBob, + StartTime: y2kPlus(8), + Time: y2kPlus(10), + Value: 3, + }, + }, + }, + }, + }, { + input: []arg[N]{ + {ctx, 1, alice}, + {ctx, 1, bob}, + // These will exceed cardinality limit. + {ctx, 1, carol}, + {ctx, 1, dave}, + }, + expect: output{ + n: 3, + agg: metricdata.Gauge[N]{ + DataPoints: []metricdata.DataPoint[N]{ + { + Attributes: fltrAlice, + StartTime: y2kPlus(11), + Time: y2kPlus(12), + Value: 1, + }, + { + Attributes: fltrBob, + StartTime: y2kPlus(11), + Time: y2kPlus(13), + Value: 1, + }, + { + Attributes: overflowSet, + StartTime: y2kPlus(11), + Time: y2kPlus(15), + Value: 1, + }, + }, + }, + }, + }, + }) +} + +func testCumulativePrecomputedLastValue[N int64 | float64]() func(*testing.T) { + in, out := Builder[N]{ + Temporality: metricdata.CumulativeTemporality, + Filter: attrFltr, + AggregationLimit: 3, + }.PrecomputedLastValue() + ctx := context.Background() + return test[N](in, out, []teststep[N]{ + { + // Empty output if nothing is measured. + input: []arg[N]{}, + expect: output{n: 0, agg: metricdata.Gauge[N]{}}, + }, { + input: []arg[N]{ + {ctx, 1, alice}, + {ctx, -1, bob}, + {ctx, 1, fltrAlice}, + {ctx, 2, alice}, + {ctx, -10, bob}, + }, + expect: output{ + n: 2, + agg: metricdata.Gauge[N]{ + DataPoints: []metricdata.DataPoint[N]{ + { + Attributes: fltrAlice, + StartTime: y2kPlus(0), + Time: y2kPlus(4), Value: 2, }, { Attributes: fltrBob, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(5), Value: -10, }, }, @@ -68,12 +418,14 @@ func testLastValue[N int64 | float64]() func(*testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(6), Value: 10, }, { Attributes: fltrBob, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(7), Value: 3, }, }, @@ -93,17 +445,20 @@ func testLastValue[N int64 | float64]() func(*testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(8), Value: 1, }, { Attributes: fltrBob, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(9), Value: 1, }, { Attributes: overflowSet, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(11), Value: 1, }, }, @@ -114,6 +469,6 @@ func testLastValue[N int64 | float64]() func(*testing.T) { } func BenchmarkLastValue(b *testing.B) { - b.Run("Int64", benchmarkAggregate(Builder[int64]{}.LastValue)) - b.Run("Float64", benchmarkAggregate(Builder[float64]{}.LastValue)) + b.Run("Int64", benchmarkAggregate(Builder[int64]{}.PrecomputedLastValue)) + b.Run("Float64", benchmarkAggregate(Builder[float64]{}.PrecomputedLastValue)) } diff --git a/sdk/metric/internal/aggregate/sum_test.go b/sdk/metric/internal/aggregate/sum_test.go index c7bb47e650d..c20adaed500 100644 --- a/sdk/metric/internal/aggregate/sum_test.go +++ b/sdk/metric/internal/aggregate/sum_test.go @@ -11,18 +11,30 @@ import ( ) func TestSum(t *testing.T) { - t.Cleanup(mockTime(now)) + c := new(clock) + t.Cleanup(c.Register()) t.Run("Int64/DeltaSum", testDeltaSum[int64]()) + c.Reset() + t.Run("Float64/DeltaSum", testDeltaSum[float64]()) + c.Reset() t.Run("Int64/CumulativeSum", testCumulativeSum[int64]()) + c.Reset() + t.Run("Float64/CumulativeSum", testCumulativeSum[float64]()) + c.Reset() t.Run("Int64/DeltaPrecomputedSum", testDeltaPrecomputedSum[int64]()) + c.Reset() + t.Run("Float64/DeltaPrecomputedSum", testDeltaPrecomputedSum[float64]()) + c.Reset() t.Run("Int64/CumulativePrecomputedSum", testCumulativePrecomputedSum[int64]()) + c.Reset() + t.Run("Float64/CumulativePrecomputedSum", testCumulativePrecomputedSum[float64]()) } @@ -62,14 +74,14 @@ func testDeltaSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(1), + Time: y2kPlus(7), Value: 4, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(1), + Time: y2kPlus(7), Value: -11, }, }, @@ -89,14 +101,14 @@ func testDeltaSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(7), + Time: y2kPlus(10), Value: 10, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(7), + Time: y2kPlus(10), Value: 3, }, }, @@ -131,20 +143,20 @@ func testDeltaSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(11), + Time: y2kPlus(16), Value: 1, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(11), + Time: y2kPlus(16), Value: 1, }, { Attributes: overflowSet, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(11), + Time: y2kPlus(16), Value: 2, }, }, @@ -190,14 +202,14 @@ func testCumulativeSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(7), Value: 4, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(7), Value: -11, }, }, @@ -217,14 +229,14 @@ func testCumulativeSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(10), Value: 14, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(10), Value: -8, }, }, @@ -245,20 +257,20 @@ func testCumulativeSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(13), Value: 14, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(13), Value: -8, }, { Attributes: overflowSet, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(13), Value: 2, }, }, @@ -304,14 +316,14 @@ func testDeltaPrecomputedSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(1), + Time: y2kPlus(7), Value: 4, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(1), + Time: y2kPlus(7), Value: -11, }, }, @@ -332,14 +344,14 @@ func testDeltaPrecomputedSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(7), + Time: y2kPlus(11), Value: 7, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(7), + Time: y2kPlus(11), Value: 14, }, }, @@ -374,20 +386,20 @@ func testDeltaPrecomputedSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(12), + Time: y2kPlus(17), Value: 1, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(12), + Time: y2kPlus(17), Value: 1, }, { Attributes: overflowSet, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(12), + Time: y2kPlus(17), Value: 2, }, }, @@ -433,14 +445,14 @@ func testCumulativePrecomputedSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(7), Value: 4, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(7), Value: -11, }, }, @@ -461,14 +473,14 @@ func testCumulativePrecomputedSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(11), Value: 11, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(11), Value: 3, }, }, @@ -503,20 +515,20 @@ func testCumulativePrecomputedSum[N int64 | float64]() func(t *testing.T) { DataPoints: []metricdata.DataPoint[N]{ { Attributes: fltrAlice, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(17), Value: 1, }, { Attributes: fltrBob, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(17), Value: 1, }, { Attributes: overflowSet, - StartTime: staticTime, - Time: staticTime, + StartTime: y2kPlus(0), + Time: y2kPlus(17), Value: 2, }, }, diff --git a/sdk/metric/pipeline.go b/sdk/metric/pipeline.go index 59453c1350b..5e60803e863 100644 --- a/sdk/metric/pipeline.go +++ b/sdk/metric/pipeline.go @@ -447,7 +447,10 @@ func (i *inserter[N]) aggregateFunc(b aggregate.Builder[N], agg Aggregation, kin case AggregationDrop: // Return nil in and out to signify the drop aggregator. case AggregationLastValue: - meas, comp = b.LastValue() + if kind == InstrumentKindObservableGauge { + meas, comp = b.PrecomputedLastValue() + } + // TODO (#5304): Support synchronous gauges. case AggregationSum: switch kind { case InstrumentKindObservableCounter: diff --git a/sdk/trace/span.go b/sdk/trace/span.go index c44f6b926aa..7acfd3fe9f4 100644 --- a/sdk/trace/span.go +++ b/sdk/trace/span.go @@ -630,7 +630,11 @@ func (s *recordingSpan) Resource() *resource.Resource { } func (s *recordingSpan) AddLink(link trace.Link) { - if !s.IsRecording() || !link.SpanContext.IsValid() { + if !s.IsRecording() { + return + } + if !link.SpanContext.IsValid() && len(link.Attributes) == 0 && + link.SpanContext.TraceState().Len() == 0 { return } diff --git a/sdk/trace/trace_test.go b/sdk/trace/trace_test.go index 615f4d58a1b..e7ef786a1b3 100644 --- a/sdk/trace/trace_test.go +++ b/sdk/trace/trace_test.go @@ -47,6 +47,7 @@ var ( tid trace.TraceID sid trace.SpanID sc trace.SpanContext + ts trace.TraceState handler = &storingHandler{} ) @@ -59,6 +60,7 @@ func init() { SpanID: sid, TraceFlags: 0x1, }) + ts, _ = trace.ParseTraceState("k=v") otel.SetErrorHandler(handler) } @@ -330,10 +332,6 @@ func TestStartSpanWithParent(t *testing.T) { t.Error(err) } - ts, err := trace.ParseTraceState("k=v") - if err != nil { - t.Error(err) - } sc2 := sc.WithTraceState(ts) _, s3 := tr.Start(trace.ContextWithRemoteSpanContext(ctx, sc2), "span3-sampled-parent2") if err := checkChild(t, sc2, s3); err != nil { @@ -1717,61 +1715,6 @@ func TestAddEventsWithMoreAttributesThanLimit(t *testing.T) { } } -func TestAddLinksWithMoreAttributesThanLimit(t *testing.T) { - te := NewTestExporter() - sl := NewSpanLimits() - sl.AttributePerLinkCountLimit = 1 - tp := NewTracerProvider( - WithSpanLimits(sl), - WithSyncer(te), - WithResource(resource.Empty()), - ) - - k1v1 := attribute.String("key1", "value1") - k2v2 := attribute.String("key2", "value2") - k3v3 := attribute.String("key3", "value3") - k4v4 := attribute.String("key4", "value4") - - sc1 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) - sc2 := trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{1, 1}), SpanID: trace.SpanID{3}}) - - span := startSpan(tp, "Links", trace.WithLinks([]trace.Link{ - {SpanContext: sc1, Attributes: []attribute.KeyValue{k1v1, k2v2}}, - {SpanContext: sc2, Attributes: []attribute.KeyValue{k2v2, k3v3, k4v4}}, - }...)) - - got, err := endSpan(te, span) - if err != nil { - t.Fatal(err) - } - - want := &snapshot{ - spanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: tid, - TraceFlags: 0x1, - }), - parent: sc.WithRemote(true), - name: "span0", - links: []Link{ - { - SpanContext: sc1, - Attributes: []attribute.KeyValue{k1v1}, - DroppedAttributeCount: 1, - }, - { - SpanContext: sc2, - Attributes: []attribute.KeyValue{k2v2}, - DroppedAttributeCount: 2, - }, - }, - spanKind: trace.SpanKindInternal, - instrumentationScope: instrumentation.Scope{Name: "Links"}, - } - if diff := cmpDiff(got, want); diff != "" { - t.Errorf("Link: -got +want %s", diff) - } -} - type stateSampler struct { prefix string f func(trace.TraceState) trace.TraceState @@ -1977,7 +1920,155 @@ func TestEmptyRecordingSpanDroppedAttributes(t *testing.T) { assert.Equal(t, 0, (&recordingSpan{}).DroppedAttributes()) } -func TestAddLinkWithInvalidSpanContext(t *testing.T) { +func TestSpanAddLink(t *testing.T) { + tests := []struct { + name string + attrLinkCountLimit int + link trace.Link + want *snapshot + }{ + { + name: "AddLinkWithInvalidSpanContext", + attrLinkCountLimit: 128, + link: trace.Link{ + SpanContext: trace.NewSpanContext(trace.SpanContextConfig{TraceID: trace.TraceID([16]byte{}), SpanID: [8]byte{}}), + }, + want: &snapshot{ + name: "span0", + spanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + TraceFlags: 0x1, + }), + parent: sc.WithRemote(true), + links: nil, + spanKind: trace.SpanKindInternal, + instrumentationScope: instrumentation.Scope{Name: "AddLinkWithInvalidSpanContext"}, + }, + }, + { + name: "AddLink", + attrLinkCountLimit: 128, + link: trace.Link{ + SpanContext: sc, + Attributes: []attribute.KeyValue{{Key: "k1", Value: attribute.StringValue("v1")}}, + }, + want: &snapshot{ + name: "span0", + spanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + TraceFlags: 0x1, + }), + parent: sc.WithRemote(true), + links: []Link{ + { + SpanContext: sc, + Attributes: []attribute.KeyValue{{Key: "k1", Value: attribute.StringValue("v1")}}, + }, + }, + spanKind: trace.SpanKindInternal, + instrumentationScope: instrumentation.Scope{Name: "AddLink"}, + }, + }, + { + name: "AddLinkWithMoreAttributesThanLimit", + attrLinkCountLimit: 1, + link: trace.Link{ + SpanContext: sc, + Attributes: []attribute.KeyValue{ + {Key: "k1", Value: attribute.StringValue("v1")}, + {Key: "k2", Value: attribute.StringValue("v2")}, + {Key: "k3", Value: attribute.StringValue("v3")}, + {Key: "k4", Value: attribute.StringValue("v4")}, + }, + }, + want: &snapshot{ + name: "span0", + spanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + TraceFlags: 0x1, + }), + parent: sc.WithRemote(true), + links: []Link{ + { + SpanContext: sc, + Attributes: []attribute.KeyValue{{Key: "k1", Value: attribute.StringValue("v1")}}, + DroppedAttributeCount: 3, + }, + }, + spanKind: trace.SpanKindInternal, + instrumentationScope: instrumentation.Scope{Name: "AddLinkWithMoreAttributesThanLimit"}, + }, + }, + { + name: "AddLinkWithAttributesEmptySpanContext", + attrLinkCountLimit: 128, + link: trace.Link{ + Attributes: []attribute.KeyValue{{Key: "k1", Value: attribute.StringValue("v1")}}, + }, + want: &snapshot{ + name: "span0", + spanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + TraceFlags: 0x1, + }), + parent: sc.WithRemote(true), + links: []Link{ + { + Attributes: []attribute.KeyValue{{Key: "k1", Value: attribute.StringValue("v1")}}, + }, + }, + spanKind: trace.SpanKindInternal, + instrumentationScope: instrumentation.Scope{Name: "AddLinkWithAttributesEmptySpanContext"}, + }, + }, + { + name: "AddLinkWithTraceStateEmptySpanContext", + attrLinkCountLimit: 128, + link: trace.Link{ + SpanContext: trace.SpanContext{}.WithTraceState(ts), + }, + want: &snapshot{ + name: "span0", + spanContext: trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + TraceFlags: 0x1, + }), + parent: sc.WithRemote(true), + links: []Link{ + { + SpanContext: trace.SpanContext{}.WithTraceState(ts), + }, + }, + spanKind: trace.SpanKindInternal, + instrumentationScope: instrumentation.Scope{Name: "AddLinkWithTraceStateEmptySpanContext"}, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + te := NewTestExporter() + sl := NewSpanLimits() + sl.AttributePerLinkCountLimit = tc.attrLinkCountLimit + + tp := NewTracerProvider(WithSpanLimits(sl), WithSyncer(te), WithResource(resource.Empty())) + + span := startSpan(tp, tc.name) + span.AddLink(tc.link) + + got, err := endSpan(te, span) + if err != nil { + t.Fatal(err) + } + + if diff := cmpDiff(got, tc.want); diff != "" { + t.Errorf("-got +want %s", diff) + } + }) + } +} + +func TestAddLinkToNonRecordingSpan(t *testing.T) { te := NewTestExporter() sl := NewSpanLimits() tp := NewTracerProvider( @@ -1985,17 +2076,21 @@ func TestAddLinkWithInvalidSpanContext(t *testing.T) { WithSyncer(te), WithResource(resource.Empty()), ) - span := startSpan(tp, "AddSpanWithInvalidSpanContext") - inValidContext := trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: trace.TraceID([16]byte{}), - SpanID: [8]byte{}, - }) + attrs := []attribute.KeyValue{{Key: "k", Value: attribute.StringValue("v")}} + + span := startSpan(tp, "AddLinkToNonRecordingSpan") + _, err := endSpan(te, span) + require.NoError(t, err) + + // Add link to ended, non-recording, span. The link should be dropped. span.AddLink(trace.Link{ - SpanContext: inValidContext, + SpanContext: sc, Attributes: attrs, }) + require.Equal(t, 1, te.Len()) + got := te.Spans()[0] want := &snapshot{ name: "span0", spanContext: trace.NewSpanContext(trace.SpanContextConfig{ @@ -2005,52 +2100,10 @@ func TestAddLinkWithInvalidSpanContext(t *testing.T) { parent: sc.WithRemote(true), links: nil, spanKind: trace.SpanKindInternal, - instrumentationScope: instrumentation.Scope{Name: "AddSpanWithInvalidSpanContext"}, - } - got, err := endSpan(te, span) - if err != nil { - t.Fatal(err) + instrumentationScope: instrumentation.Scope{Name: "AddLinkToNonRecordingSpan"}, } - if diff := cmpDiff(got, want); diff != "" { - t.Errorf("AddLinkWithInvalidSpanContext: -got +want %s", diff) - } -} - -func TestAddLink(t *testing.T) { - te := NewTestExporter() - sl := NewSpanLimits() - tp := NewTracerProvider( - WithSpanLimits(sl), - WithSyncer(te), - WithResource(resource.Empty()), - ) - attrs := []attribute.KeyValue{{Key: "k", Value: attribute.StringValue("v")}} - span := startSpan(tp, "AddSpan") - - link := trace.Link{SpanContext: sc, Attributes: attrs} - span.AddLink(link) - want := &snapshot{ - name: "span0", - spanContext: trace.NewSpanContext(trace.SpanContextConfig{ - TraceID: tid, - TraceFlags: 0x1, - }), - parent: sc.WithRemote(true), - links: []Link{ - { - SpanContext: sc, - Attributes: attrs, - }, - }, - spanKind: trace.SpanKindInternal, - instrumentationScope: instrumentation.Scope{Name: "AddSpan"}, - } - got, err := endSpan(te, span) - if err != nil { - t.Fatal(err) - } if diff := cmpDiff(got, want); diff != "" { - t.Errorf("AddLink: -got +want %s", diff) + t.Errorf("AddLinkToNonRecordingSpan: -got +want %s", diff) } }