From 9ef811489edc4dc1bef05e8e1ce40e69b4958e3e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:55:43 +0000 Subject: [PATCH] fix(deps): update module github.com/fsouza/fake-gcs-server to v1.50.0 --- go.mod | 60 +- go.sum | 123 +- vendor/cloud.google.com/go/auth/CHANGES.md | 34 + vendor/cloud.google.com/go/auth/auth.go | 11 +- .../go/auth/credentials/detect.go | 4 +- .../go/auth/credentials/filetypes.go | 17 +- .../go/auth/credentials/selfsignedjwt.go | 4 + .../go/auth/grpctransport/grpctransport.go | 11 +- .../go/auth/httptransport/httptransport.go | 2 + .../go/auth/httptransport/transport.go | 13 +- .../transport/cert/secureconnect_cert.go | 10 +- .../go/auth/internal/transport/s2a.go | 3 +- .../go/auth/internal/transport/transport.go | 6 +- .../cloud.google.com/go/bigtable/CHANGES.md | 27 + vendor/cloud.google.com/go/bigtable/admin.go | 71 +- .../cloud.google.com/go/bigtable/bigtable.go | 8 +- .../go/bigtable/bttest/inmem.go | 9 +- .../go/bigtable/conformance_test.sh | 8 +- .../go/bigtable/internal/version.go | 2 +- vendor/cloud.google.com/go/bigtable/type.go | 55 +- .../go/compute/metadata/CHANGES.md | 7 + .../go/compute/metadata/retry_linux.go | 9 +- vendor/cloud.google.com/go/iam/CHANGES.md | 7 + vendor/cloud.google.com/go/pubsub/CHANGES.md | 15 + .../go/pubsub/apiv1/auxiliary_go123.go | 56 + .../go/pubsub/internal/version.go | 2 +- vendor/cloud.google.com/go/pubsub/iterator.go | 53 +- .../cloud.google.com/go/pubsub/pullstream.go | 29 +- vendor/cloud.google.com/go/pubsub/topic.go | 10 +- vendor/cloud.google.com/go/pubsub/trace.go | 24 +- .../cncf/xds/go/xds/type/matcher/v3/cel.pb.go | 38 +- .../go/xds/type/matcher/v3/http_inputs.pb.go | 23 +- .../xds/go/xds/type/matcher/v3/matcher.pb.go | 266 +- .../github.com/fsouza/fake-gcs-server/LICENSE | 2 +- .../fake-gcs-server/fakestorage/bucket.go | 160 +- .../fake-gcs-server/fakestorage/config.go | 30 + .../fakestorage/json_response.go | 84 + .../fakestorage/mux_tranport.go | 6 +- .../fake-gcs-server/fakestorage/object.go | 1194 +- .../fake-gcs-server/fakestorage/response.go | 205 +- .../fake-gcs-server/fakestorage/server.go | 503 +- .../fake-gcs-server/fakestorage/upload.go | 636 +- .../fakestorage/xml_response.go | 123 + .../internal/backend/bucket.go | 22 + .../fake-gcs-server/internal/backend/fs.go | 450 +- .../internal/backend/lazy_file.go | 53 + .../internal/backend/memory.go | 404 +- .../internal/backend/metadata.go | 13 + .../internal/backend/metadata_file.go | 34 + .../internal/backend/metadata_xattr.go | 33 + .../internal/backend/object.go | 104 +- .../internal/backend/storage.go | 42 +- .../internal/backend/time_bsd.go | 19 + .../internal/backend/time_linux.go | 18 + .../internal/backend/time_windows.go | 18 + .../internal/backend/writefile_unix.go | 17 + .../internal/backend/writefile_windows.go | 13 + .../internal/checksum/checksum.go | 70 + .../internal/notification/event.go | 222 + vendor/github.com/google/btree/btree.go | 2 +- .../testdata/client_intermediate_cert.der | Bin 998 -> 0 bytes .../testdata/client_leaf_cert.der | Bin 1147 -> 0 bytes .../testdata/client_root_cert.der | Bin 1013 -> 0 bytes .../testdata/server_intermediate_cert.der | Bin 998 -> 0 bytes .../testdata/server_leaf_cert.der | Bin 1147 -> 0 bytes .../testdata/server_root_cert.der | Bin 1013 -> 0 bytes .../v2/remotesigner/testdata/client_cert.der | Bin 1013 -> 0 bytes .../v2/remotesigner/testdata/client_cert.pem | 24 - .../v2/remotesigner/testdata/client_key.pem | 27 - .../v2/remotesigner/testdata/server_cert.der | Bin 1013 -> 0 bytes .../v2/remotesigner/testdata/server_cert.pem | 24 - .../v2/remotesigner/testdata/server_key.pem | 27 - .../internal/v2/testdata/client_cert.pem | 24 - .../internal/v2/testdata/client_key.pem | 27 - .../internal/v2/testdata/server_cert.pem | 24 - .../internal/v2/testdata/server_key.pem | 27 - .../tlsconfigstore/testdata/client_cert.pem | 24 - .../v2/tlsconfigstore/testdata/client_key.pem | 27 - .../tlsconfigstore/testdata/server_cert.pem | 24 - .../v2/tlsconfigstore/testdata/server_key.pem | 27 - .../google/s2a-go/testdata/client_cert.pem | 24 - .../google/s2a-go/testdata/client_key.pem | 27 - .../s2a-go/testdata/mds_client_cert.pem | 19 - .../google/s2a-go/testdata/mds_client_key.pem | 28 - .../google/s2a-go/testdata/mds_root_cert.pem | 21 - .../s2a-go/testdata/mds_server_cert.pem | 21 - .../google/s2a-go/testdata/mds_server_key.pem | 28 - .../s2a-go/testdata/self_signed_cert.pem | 19 - .../s2a-go/testdata/self_signed_key.pem | 28 - .../google/s2a-go/testdata/server_cert.pem | 24 - .../google/s2a-go/testdata/server_key.pem | 27 - .../github.com/gorilla/handlers/.editorconfig | 20 + vendor/github.com/gorilla/handlers/.gitignore | 2 + vendor/github.com/gorilla/handlers/LICENSE | 27 + vendor/github.com/gorilla/handlers/Makefile | 34 + vendor/github.com/gorilla/handlers/README.md | 56 + .../github.com/gorilla/handlers/canonical.go | 73 + .../github.com/gorilla/handlers/compress.go | 143 + vendor/github.com/gorilla/handlers/cors.go | 352 + vendor/github.com/gorilla/handlers/doc.go | 9 + .../github.com/gorilla/handlers/handlers.go | 150 + vendor/github.com/gorilla/handlers/logging.go | 246 + .../gorilla/handlers/proxy_headers.go | 120 + .../github.com/gorilla/handlers/recovery.go | 98 + vendor/github.com/pkg/xattr/.gitignore | 26 + vendor/github.com/pkg/xattr/LICENSE | 25 + vendor/github.com/pkg/xattr/README.md | 45 + vendor/github.com/pkg/xattr/xattr.go | 258 + vendor/github.com/pkg/xattr/xattr_bsd.go | 201 + vendor/github.com/pkg/xattr/xattr_darwin.go | 90 + vendor/github.com/pkg/xattr/xattr_linux.go | 142 + vendor/github.com/pkg/xattr/xattr_solaris.go | 175 + .../github.com/pkg/xattr/xattr_unsupported.go | 70 + .../google.golang.org/grpc/otelgrpc/config.go | 28 + .../grpc/otelgrpc/stats_handler.go | 8 +- .../grpc/otelgrpc/version.go | 2 +- .../net/http/otelhttp/common.go | 7 - .../net/http/otelhttp/config.go | 15 +- .../net/http/otelhttp/handler.go | 93 +- .../otelhttp/internal/request/body_wrapper.go | 75 + .../internal/request/resp_writer_wrapper.go | 112 + .../net/http/otelhttp/internal/semconv/env.go | 97 +- .../semconv/{v1.24.0.go => httpconv.go} | 153 +- .../http/otelhttp/internal/semconv/util.go | 11 +- .../http/otelhttp/internal/semconv/v1.20.0.go | 118 + .../otelhttp/internal/semconvutil/netconv.go | 2 +- .../net/http/otelhttp/transport.go | 64 +- .../net/http/otelhttp/version.go | 2 +- .../instrumentation/net/http/otelhttp/wrap.go | 99 - vendor/go.opentelemetry.io/otel/.golangci.yml | 2 + vendor/go.opentelemetry.io/otel/CHANGELOG.md | 67 +- vendor/go.opentelemetry.io/otel/CODEOWNERS | 2 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 2 +- vendor/go.opentelemetry.io/otel/Makefile | 13 +- vendor/go.opentelemetry.io/otel/README.md | 37 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 1 + .../otel/baggage/baggage.go | 150 +- .../go.opentelemetry.io/otel/codes/codes.go | 2 +- vendor/go.opentelemetry.io/otel/doc.go | 2 + .../otel/internal/rawhelpers.go | 9 +- .../go.opentelemetry.io/otel/metric/meter.go | 13 + .../otel/sdk/instrumentation/library.go | 3 +- .../otel/sdk/metric/doc.go | 8 + .../aggregate/exponential_histogram.go | 78 +- .../sdk/metric/internal/exemplar/value.go | 3 +- .../otel/sdk/metric/meter.go | 20 + .../otel/sdk/metric/reader.go | 2 +- .../otel/sdk/metric/version.go | 2 +- .../go.opentelemetry.io/otel/sdk/version.go | 2 +- .../otel/semconv/v1.24.0/README.md | 3 - .../otel/semconv/v1.24.0/attribute_group.go | 4387 ------ .../otel/semconv/v1.24.0/doc.go | 9 - .../otel/semconv/v1.24.0/event.go | 200 - .../otel/semconv/v1.24.0/exception.go | 9 - .../otel/semconv/v1.24.0/metric.go | 1071 -- .../otel/semconv/v1.24.0/resource.go | 2545 ---- .../otel/semconv/v1.24.0/schema.go | 9 - .../otel/semconv/v1.24.0/trace.go | 1323 -- .../otel/trace/provider.go | 59 + vendor/go.opentelemetry.io/otel/trace/span.go | 177 + .../go.opentelemetry.io/otel/trace/trace.go | 249 - .../go.opentelemetry.io/otel/trace/tracer.go | 37 + .../otel/trace/tracestate.go | 10 + .../otel/verify_released_changelog.sh | 42 + vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 8 +- .../golang.org/x/crypto/argon2/blamka_amd64.s | 2972 +++- .../x/crypto/blake2b/blake2bAVX2_amd64.s | 5167 ++++++- .../x/crypto/blake2b/blake2b_amd64.s | 1681 ++- .../chacha20poly1305/chacha20poly1305_amd64.s | 11503 +++++++++++++--- .../x/crypto/internal/poly1305/sum_amd64.s | 133 +- vendor/golang.org/x/oauth2/token.go | 7 + vendor/golang.org/x/sys/cpu/cpu.go | 19 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 2 +- .../golang.org/x/sys/cpu/cpu_linux_riscv64.go | 137 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 11 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 1 + .../golang.org/x/sys/unix/syscall_darwin.go | 37 + vendor/golang.org/x/sys/unix/syscall_hurd.go | 1 + .../x/sys/unix/zerrors_darwin_amd64.go | 7 + .../x/sys/unix/zerrors_darwin_arm64.go | 7 + .../x/sys/unix/zerrors_zos_s390x.go | 2 + .../x/sys/unix/zsyscall_darwin_amd64.go | 20 + .../x/sys/unix/zsyscall_darwin_amd64.s | 5 + .../x/sys/unix/zsyscall_darwin_arm64.go | 20 + .../x/sys/unix/zsyscall_darwin_arm64.s | 5 + .../x/sys/unix/ztypes_darwin_amd64.go | 13 + .../x/sys/unix/ztypes_darwin_arm64.go | 13 + .../x/sys/unix/ztypes_freebsd_386.go | 1 + .../x/sys/unix/ztypes_freebsd_amd64.go | 1 + .../x/sys/unix/ztypes_freebsd_arm.go | 1 + .../x/sys/unix/ztypes_freebsd_arm64.go | 1 + .../x/sys/unix/ztypes_freebsd_riscv64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 2 +- .../x/sys/unix/ztypes_linux_riscv64.go | 33 + .../x/sys/windows/syscall_windows.go | 4 + .../golang.org/x/sys/windows/types_windows.go | 1 + .../x/sys/windows/zsyscall_windows.go | 38 + vendor/golang.org/x/term/term_windows.go | 1 + .../api/compute/v1/compute-api.json | 50 +- .../api/compute/v1/compute-gen.go | 47 +- .../api/compute/v1/compute2-gen.go | 4 +- .../api/compute/v1/compute3-gen.go | 4 +- .../google.golang.org/api/internal/version.go | 2 +- .../api/storage/v1/storage-api.json | 14 +- .../api/storage/v1/storage-gen.go | 4 + .../api/transport/grpc/dial.go | 1 + .../api/transport/http/dial.go | 1 + .../googleapis/api/annotations/client.pb.go | 426 +- vendor/google.golang.org/grpc/MAINTAINERS.md | 33 +- vendor/google.golang.org/grpc/SECURITY.md | 2 +- .../google.golang.org/grpc/backoff/backoff.go | 2 +- .../grpc/balancer/balancer.go | 21 + .../grpc/balancer/base/balancer.go | 4 +- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 28 +- .../grpc_lb_v1/load_balancer_grpc.pb.go | 85 +- .../grpc/balancer/grpclb/grpclb.go | 2 +- .../balancer/leastrequest/leastrequest.go | 4 +- .../grpc/balancer/pickfirst/pickfirst.go | 4 +- .../grpc/balancer/rls/balancer.go | 87 +- .../grpc/balancer/rls/cache.go | 58 +- .../rls/internal/adaptive/lookback.go | 7 - .../balancer/rls/internal/keys/builder.go | 2 +- .../grpc/balancer/rls/picker.go | 104 +- .../balancer/weightedroundrobin/balancer.go | 135 +- .../balancer/weightedroundrobin/scheduler.go | 18 +- .../weightedaggregator/aggregator.go | 2 +- .../balancer/weightedtarget/weightedtarget.go | 13 +- .../grpc/balancer_wrapper.go | 50 +- .../grpc_binarylog_v1/binarylog.pb.go | 24 +- vendor/google.golang.org/grpc/clientconn.go | 114 +- vendor/google.golang.org/grpc/codec.go | 69 +- .../alts/internal/conn/aeadrekey.go | 2 +- .../alts/internal/conn/aes128gcmrekey.go | 2 +- .../credentials/alts/internal/conn/record.go | 7 - .../alts/internal/handshaker/handshaker.go | 4 +- .../internal/handshaker/service/service.go | 4 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 8 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 299 +- .../proto/grpc_gcp/handshaker_grpc.pb.go | 23 +- .../grpc_gcp/transport_security_common.pb.go | 10 +- .../grpc/credentials/insecure/insecure.go | 2 +- .../grpc/credentials/oauth/oauth.go | 6 +- .../credentials/tls/certprovider/store.go | 2 +- vendor/google.golang.org/grpc/dialoptions.go | 29 +- vendor/google.golang.org/grpc/doc.go | 2 +- .../grpc/encoding/encoding.go | 5 +- .../grpc/encoding/encoding_v2.go | 81 + .../grpc/encoding/proto/proto.go | 44 +- .../grpc/experimental/experimental.go | 55 +- .../grpc/experimental/stats/metricregistry.go | 269 + .../grpc/experimental/stats/metrics.go | 114 + .../grpc/grpclog/component.go | 10 +- .../google.golang.org/grpc/grpclog/grpclog.go | 104 +- .../grpc/grpclog/internal/grpclog.go | 26 + .../grpc/grpclog/internal/logger.go | 87 + .../internal/loggerv2.go} | 178 +- .../google.golang.org/grpc/grpclog/logger.go | 59 +- .../grpc/grpclog/loggerv2.go | 181 +- .../grpc/health/grpc_health_v1/health.pb.go | 10 +- .../health/grpc_health_v1/health_grpc.pb.go | 71 +- .../google.golang.org/grpc/health/server.go | 2 +- .../internal/balancergroup/balancergroup.go | 48 +- .../grpc/internal/binarylog/method_logger.go | 2 +- .../grpc/internal/channelz/channelmap.go | 9 +- .../grpc/internal/channelz/funcs.go | 2 +- .../internal/channelz/syscall_nonlinux.go | 4 +- .../grpc/internal/envconfig/envconfig.go | 6 +- .../grpc/internal/experimental.go | 8 +- .../grpc/internal/googlecloud/googlecloud.go | 6 +- .../{prefixLogger.go => prefix_logger.go} | 40 +- .../internal/grpcsync/callback_serializer.go | 24 +- .../grpc/internal/grpcsync/pubsub.go | 4 +- .../grpc/internal/internal.go | 24 +- .../internal/proto/grpc_lookup_v1/rls.pb.go | 10 +- .../proto/grpc_lookup_v1/rls_config.pb.go | 135 +- .../proto/grpc_lookup_v1/rls_grpc.pb.go | 23 +- .../resolver/passthrough/passthrough.go | 2 +- .../internal/stats/metrics_recorder_list.go | 95 + .../grpc/internal/status/status.go | 4 +- .../grpc/internal/syscall/syscall_nonlinux.go | 6 +- .../grpc/internal/tcp_keepalive_unix.go | 2 +- .../grpc/internal/tcp_keepalive_windows.go | 2 +- .../grpc/internal/transport/controlbuf.go | 256 +- .../grpc/internal/transport/handler_server.go | 47 +- .../grpc/internal/transport/http2_client.go | 79 +- .../grpc/internal/transport/http2_server.go | 49 +- .../grpc/internal/transport/http_util.go | 24 +- .../grpc/internal/transport/proxy.go | 10 +- .../grpc/internal/transport/transport.go | 242 +- .../grpc/internal/xds/bootstrap/bootstrap.go | 1005 +- .../grpc/internal/xds/bootstrap/template.go | 2 +- .../grpc/internal/xds/rbac/converter.go | 4 +- .../grpc/internal/xds/rbac/matchers.go | 2 +- .../grpc/internal/xds/rbac/rbac_engine.go | 18 +- .../grpc/keepalive/keepalive.go | 20 +- .../google.golang.org/grpc/mem/buffer_pool.go | 194 + .../grpc/mem/buffer_slice.go | 226 + vendor/google.golang.org/grpc/mem/buffers.go | 252 + .../grpc/metadata/metadata.go | 7 +- .../grpc/orca/call_metrics.go | 2 +- .../google.golang.org/grpc/orca/producer.go | 4 +- .../grpc/orca/server_metrics.go | 2 +- vendor/google.golang.org/grpc/preloader.go | 28 +- vendor/google.golang.org/grpc/regenerate.sh | 123 - .../grpc/resolver/manual/manual.go | 4 +- .../grpc/resolver_wrapper.go | 9 +- vendor/google.golang.org/grpc/rpc_util.go | 331 +- vendor/google.golang.org/grpc/server.go | 99 +- .../grpc/shared_buffer_pool.go | 154 - vendor/google.golang.org/grpc/stats/stats.go | 6 - vendor/google.golang.org/grpc/stream.go | 213 +- .../grpc/stream_interfaces.go | 86 + .../grpc/test/bufconn/bufconn.go | 2 +- vendor/google.golang.org/grpc/version.go | 2 +- .../grpc/xds/bootstrap/credentials.go | 2 +- .../google.golang.org/grpc/xds/csds/csds.go | 27 +- .../grpc/xds/googledirectpath/googlec2p.go | 118 +- .../balancer/cdsbalancer/cdsbalancer.go | 26 +- .../balancer/cdsbalancer/cluster_watcher.go | 21 +- .../balancer/clusterimpl/clusterimpl.go | 206 +- .../internal/balancer/clusterimpl/picker.go | 25 +- .../clusterresolver/clusterresolver.go | 9 +- .../clusterresolver/resource_resolver.go | 50 +- .../clusterresolver/resource_resolver_dns.go | 8 +- .../clusterresolver/resource_resolver_eds.go | 16 +- .../balancer/loadstore/load_store_wrapper.go | 2 +- .../balancer/outlierdetection/balancer.go | 14 - .../internal/balancer/priority/balancer.go | 1 + .../balancer/priority/balancer_priority.go | 12 +- .../xds/internal/balancer/ringhash/picker.go | 25 - .../xds/internal/balancer/ringhash/ring.go | 12 +- .../internal/balancer/ringhash/ringhash.go | 45 +- .../xds/internal/clusterspecifier/rls/rls.go | 4 +- .../xds/internal/httpfilter/fault/fault.go | 10 +- .../grpc/xds/internal/httpfilter/rbac/rbac.go | 14 +- .../xds/internal/httpfilter/router/router.go | 4 +- .../grpc/xds/internal/internal.go | 5 + .../internal/resolver/internal/internal.go | 2 +- .../xds/internal/resolver/serviceconfig.go | 6 +- .../xds/internal/resolver/watch_service.go | 45 +- .../xds/internal/resolver/xds_resolver.go | 24 +- .../grpc/xds/internal/server/conn_wrapper.go | 4 +- .../xds/internal/server/listener_wrapper.go | 9 +- .../grpc/xds/internal/server/rds_handler.go | 9 +- .../grpc/xds/internal/xdsclient/authority.go | 80 +- .../grpc/xds/internal/xdsclient/client.go | 6 - .../grpc/xds/internal/xdsclient/client_new.go | 110 +- .../internal/xdsclient/client_refcounted.go | 104 + .../grpc/xds/internal/xdsclient/clientimpl.go | 18 +- .../xdsclient/clientimpl_authority.go | 14 +- .../xds/internal/xdsclient/clientimpl_dump.go | 37 +- .../xdsclient/clientimpl_loadreport.go | 2 +- .../internal/xdsclient/clientimpl_watchers.go | 4 +- .../internal/xdsclient/internal/internal.go | 25 + .../grpc/xds/internal/xdsclient/load/store.go | 17 +- .../grpc/xds/internal/xdsclient/singleton.go | 115 - .../xdsclient/transport/internal/internal.go | 25 + .../xdsclient/transport/loadreport.go | 1 + .../internal/xdsclient/transport/transport.go | 152 +- .../xdsresource/cluster_resource_type.go | 18 +- .../xdsresource/endpoints_resource_type.go | 20 +- .../xdsclient/xdsresource/filter_chain.go | 8 +- .../xdsresource/listener_resource_type.go | 22 +- .../xdsclient/xdsresource/resource_type.go | 12 +- .../xdsresource/route_config_resource_type.go | 20 +- .../xdsclient/xdsresource/unmarshal_cds.go | 12 +- vendor/google.golang.org/grpc/xds/server.go | 17 +- vendor/modules.txt | 75 +- 369 files changed, 32274 insertions(+), 18189 deletions(-) create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_bsd.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go create mode 100644 vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_key.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/server_cert.pem delete mode 100644 vendor/github.com/google/s2a-go/testdata/server_key.pem create mode 100644 vendor/github.com/gorilla/handlers/.editorconfig create mode 100644 vendor/github.com/gorilla/handlers/.gitignore create mode 100644 vendor/github.com/gorilla/handlers/LICENSE create mode 100644 vendor/github.com/gorilla/handlers/Makefile create mode 100644 vendor/github.com/gorilla/handlers/README.md create mode 100644 vendor/github.com/gorilla/handlers/canonical.go create mode 100644 vendor/github.com/gorilla/handlers/compress.go create mode 100644 vendor/github.com/gorilla/handlers/cors.go create mode 100644 vendor/github.com/gorilla/handlers/doc.go create mode 100644 vendor/github.com/gorilla/handlers/handlers.go create mode 100644 vendor/github.com/gorilla/handlers/logging.go create mode 100644 vendor/github.com/gorilla/handlers/proxy_headers.go create mode 100644 vendor/github.com/gorilla/handlers/recovery.go create mode 100644 vendor/github.com/pkg/xattr/.gitignore create mode 100644 vendor/github.com/pkg/xattr/LICENSE create mode 100644 vendor/github.com/pkg/xattr/README.md create mode 100644 vendor/github.com/pkg/xattr/xattr.go create mode 100644 vendor/github.com/pkg/xattr/xattr_bsd.go create mode 100644 vendor/github.com/pkg/xattr/xattr_darwin.go create mode 100644 vendor/github.com/pkg/xattr/xattr_linux.go create mode 100644 vendor/github.com/pkg/xattr/xattr_solaris.go create mode 100644 vendor/github.com/pkg/xattr/xattr_unsupported.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go create mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go rename vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/{v1.24.0.go => httpconv.go} (57%) delete mode 100644 vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go delete mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/provider.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/span.go create mode 100644 vendor/go.opentelemetry.io/otel/trace/tracer.go create mode 100644 vendor/go.opentelemetry.io/otel/verify_released_changelog.sh create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding_v2.go create mode 100644 vendor/google.golang.org/grpc/experimental/stats/metricregistry.go create mode 100644 vendor/google.golang.org/grpc/experimental/stats/metrics.go create mode 100644 vendor/google.golang.org/grpc/grpclog/internal/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/internal/logger.go rename vendor/google.golang.org/grpc/{internal/grpclog/grpclog.go => grpclog/internal/loggerv2.go} (52%) rename vendor/google.golang.org/grpc/internal/grpclog/{prefixLogger.go => prefix_logger.go} (63%) create mode 100644 vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go create mode 100644 vendor/google.golang.org/grpc/mem/buffer_pool.go create mode 100644 vendor/google.golang.org/grpc/mem/buffer_slice.go create mode 100644 vendor/google.golang.org/grpc/mem/buffers.go delete mode 100644 vendor/google.golang.org/grpc/regenerate.sh delete mode 100644 vendor/google.golang.org/grpc/shared_buffer_pool.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go delete mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go create mode 100644 vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go diff --git a/go.mod b/go.mod index 3981ee97bf44a..105171d8314e4 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,12 @@ module github.com/grafana/loki/v3 -go 1.21.8 +go 1.22 + +toolchain go1.23.1 require ( - cloud.google.com/go/bigtable v1.29.0 - cloud.google.com/go/pubsub v1.42.0 + cloud.google.com/go/bigtable v1.31.0 + cloud.google.com/go/pubsub v1.43.0 cloud.google.com/go/storage v1.43.0 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.14.0 @@ -34,7 +36,7 @@ require ( github.com/fatih/color v1.16.0 github.com/felixge/fgprof v0.9.4 github.com/fluent/fluent-bit-go v0.0.0-20230731091245-a7a013e2473c - github.com/fsouza/fake-gcs-server v1.7.0 + github.com/fsouza/fake-gcs-server v1.50.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.6.0 github.com/go-redis/redis/v8 v8.11.5 @@ -96,13 +98,13 @@ require ( go.etcd.io/bbolt v1.3.10 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.3.0 - golang.org/x/crypto v0.26.0 - golang.org/x/net v0.28.0 + golang.org/x/crypto v0.27.0 + golang.org/x/net v0.29.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.24.0 + golang.org/x/sys v0.25.0 golang.org/x/time v0.6.0 - google.golang.org/api v0.193.0 - google.golang.org/grpc v1.65.0 + google.golang.org/api v0.198.0 + google.golang.org/grpc v1.67.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -148,8 +150,8 @@ require ( go.opentelemetry.io/collector/pdata v1.12.0 go4.org/netipx v0.0.0-20230125063823-8449b0a6169f golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 - golang.org/x/oauth2 v0.22.0 - golang.org/x/text v0.17.0 + golang.org/x/oauth2 v0.23.0 + golang.org/x/text v0.18.0 google.golang.org/protobuf v1.34.2 gotest.tools v2.2.0+incompatible k8s.io/apimachinery v0.29.3 @@ -158,7 +160,7 @@ require ( require ( cel.dev/expr v0.16.0 // indirect - cloud.google.com/go/auth v0.9.0 // indirect + cloud.google.com/go/auth v0.9.4 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/monitoring v1.21.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect @@ -170,12 +172,14 @@ require ( github.com/go-ini/ini v1.67.0 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/goccy/go-json v0.10.3 // indirect + github.com/gorilla/handlers v1.5.2 // indirect github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/sys/userns v0.1.0 // indirect github.com/ncw/swift v1.0.53 // indirect github.com/pires/go-proxyproto v0.7.0 // indirect + github.com/pkg/xattr v0.4.10 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/rivo/uniseg v0.4.7 // indirect @@ -183,14 +187,14 @@ require ( github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect ) require ( cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.2.0 // indirect + cloud.google.com/go/compute/metadata v0.5.1 // indirect + cloud.google.com/go/iam v1.2.1 // indirect cloud.google.com/go/longrunning v0.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect @@ -229,7 +233,7 @@ require ( github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect + github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -271,13 +275,13 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/btree v1.1.2 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da // indirect github.com/google/s2a-go v0.1.8 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gophercloud/gophercloud v1.13.0 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect @@ -348,19 +352,19 @@ require ( go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/collector/semconv v0.105.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/mod v0.19.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/term v0.24.0 // indirect golang.org/x/tools v0.23.0 // indirect - google.golang.org/genproto v0.0.0-20240820151423-278611b39280 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280 // indirect + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect diff --git a/go.sum b/go.sum index fabf4aa4fdca5..e703832f0519a 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPus cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= -cloud.google.com/go/auth v0.9.0 h1:cYhKl1JUhynmxjXfrk4qdPc6Amw7i+GC9VLflgT0p5M= -cloud.google.com/go/auth v0.9.0/go.mod h1:2HsApZBr9zGZhC9QAXsYVYaWk8kNUt37uny+XVKi7wM= +cloud.google.com/go/auth v0.9.4 h1:DxF7imbEbiFu9+zdKC6cKBko1e8XeJnipNqIbWZ+kDI= +cloud.google.com/go/auth v0.9.4/go.mod h1:SHia8n6//Ya940F1rLimhJCjjx7KE17t0ctFEci3HkA= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= @@ -53,8 +53,8 @@ cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUM cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= -cloud.google.com/go/bigtable v1.29.0 h1:2CnFjKPwjpZMZdTi2RpppvxzD80zKzDYrLYEQw/NnAs= -cloud.google.com/go/bigtable v1.29.0/go.mod h1:5p909nNdWaNUcWs6KGZO8mI5HUovstlmrIi7+eA5PTQ= +cloud.google.com/go/bigtable v1.31.0 h1:/uVLxGVRbK4mxK/iO89VqXcL/zoTSmkltVfIDYVBluQ= +cloud.google.com/go/bigtable v1.31.0/go.mod h1:N/mwZO+4TSHOeyiE1JxO+sRPnW4bnR7WLn9AEaiJqew= cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= @@ -64,8 +64,8 @@ cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6m cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.1 h1:NM6oZeZNlYjiwYje+sYFjEpP0Q0zCan1bmQW/KmIrGs= +cloud.google.com/go/compute/metadata v0.5.1/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= @@ -84,10 +84,10 @@ cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFP cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= -cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= -cloud.google.com/go/kms v1.18.5 h1:75LSlVs60hyHK3ubs2OHd4sE63OAMcM2BdSJc2bkuM4= -cloud.google.com/go/kms v1.18.5/go.mod h1:yXunGUGzabH8rjUPImp2ndHiGolHeWJJ0LODLedicIY= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/kms v1.19.0 h1:x0OVJDl6UH1BSX4THKlMfdcFWoE4ruh90ZHuilZekrU= +cloud.google.com/go/kms v1.19.0/go.mod h1:e4imokuPJUc17Trz2s6lEXFDt8bgDmvpVynH39bdrHM= cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= @@ -108,8 +108,8 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.42.0 h1:PVTbzorLryFL5ue8esTS2BfehUs0ahyNOY9qcd+HMOs= -cloud.google.com/go/pubsub v1.42.0/go.mod h1:KADJ6s4MbTwhXmse/50SebEhE4SmUwHi48z3/dHar1Y= +cloud.google.com/go/pubsub v1.43.0 h1:s3Qx+F96J7Kwey/uVHdK3QxFLIlOvvw4SfMYw2jFjb4= +cloud.google.com/go/pubsub v1.43.0/go.mod h1:LNLfqItblovg7mHWgU5g84Vhza4J8kTxx0YqIeTzcXY= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= @@ -459,8 +459,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 h1:fLZ97KE86ELjEYJCEUVzmbhfzDxHHGwBrDVMd4XL6Bs= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= @@ -642,8 +642,8 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8= -github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= +github.com/fsouza/fake-gcs-server v1.50.0 h1:MRW1OuLyHnFKKLGNILiH+x6CMKqn/R05auJ5ET5PCyk= +github.com/fsouza/fake-gcs-server v1.50.0/go.mod h1:itn0kDInXYbYXZ+2dLch83bR8lpp7YQ5czkZnH6IRH8= github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY= github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= @@ -934,8 +934,8 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -1005,8 +1005,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -1029,8 +1029,9 @@ github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdy github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= @@ -1589,6 +1590,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA= +github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1886,8 +1889,8 @@ github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7 github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.einride.tech/aip v0.67.1 h1:d/4TW92OxXBngkSOwWS2CH5rez869KpKMaN44mdxkFI= -go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI= +go.einride.tech/aip v0.68.0 h1:4seM66oLzTpz50u4K1zlJyOXQ3tCzcJN7I22tKkjipw= +go.einride.tech/aip v0.68.0/go.mod h1:7y9FF8VtPWqpxuAxl0KQWqaULxW4zFIesD6zF5RIHHg= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -1929,24 +1932,24 @@ go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimK go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI= go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= -go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= @@ -2012,8 +2015,8 @@ golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2141,8 +2144,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2167,8 +2170,8 @@ golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7Lm golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2291,6 +2294,7 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2312,8 +2316,8 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2323,8 +2327,8 @@ golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2342,8 +2346,8 @@ golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2453,7 +2457,6 @@ gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6d gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -2499,8 +2502,8 @@ google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOI google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= -google.golang.org/api v0.193.0 h1:eOGDoJFsLU+HpCBaDJex2fWiYujAw9KbXgpOAMePoUs= -google.golang.org/api v0.193.0/go.mod h1:Po3YMV1XZx+mTku3cfJrlIYR03wiGrCOsdpC67hjZvw= +google.golang.org/api v0.198.0 h1:OOH5fZatk57iN0A7tjJQzt6aPfYQ1JiWkt1yGseazks= +google.golang.org/api v0.198.0/go.mod h1:/Lblzl3/Xqqk9hw/yS97TImKTUwnf1bv89v7+OagJzc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2611,12 +2614,12 @@ google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+S google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ= -google.golang.org/genproto v0.0.0-20240820151423-278611b39280 h1:oKt8r1ZvaPqBe3oeGTdyx1iNjuBS+VJcc9QdU1CD3d8= -google.golang.org/genproto v0.0.0-20240820151423-278611b39280/go.mod h1:wxEc5TmU9JSLs1rSqG4z1YzeSNigp/9yIojIPuZVvKQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280 h1:YDFM9oOjiFhaMAVgbDxfxW+66nRrsvzQzJ51wp3OxC0= -google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280/go.mod h1:fO8wJzT2zbQbAjbIoos1285VfEIYKDDY+Dt+WpTkh6g= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280 h1:XQMA2e105XNlEZ8NRF0HqnUOZzP14sUSsgL09kpdNnU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2659,8 +2662,8 @@ google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/grpc v1.67.0 h1:IdH9y6PF5MPSdAntIcpjQ+tXO41pcQsfZV2RxtQgVcw= +google.golang.org/grpc v1.67.0/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 8042bdae809b6..e82cf5a901a96 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,39 @@ # Changelog +## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11) + + +### Bug Fixes + +* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6)) + +## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) + + +### Bug Fixes + +* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804) + +## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30) + + +### Bug Fixes + +* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742) +* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795) + + +### Documentation + +* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437)) + +## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22) + + +### Bug Fixes + +* **auth:** Setting expireEarly to default when the value is 0 ([#10732](https://github.com/googleapis/google-cloud-go/issues/10732)) ([5e67869](https://github.com/googleapis/google-cloud-go/commit/5e67869a31e9e8ecb4eeebd2cfa11a761c3b1948)) + ## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16) diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index 41e03f293546e..bc37ea85fb51f 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -12,6 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package auth provides utilities for managing Google Cloud credentials, +// including functionality for creating, caching, and refreshing OAuth2 tokens. +// It offers customizable options for different OAuth2 flows, such as 2-legged +// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic +// token management. package auth import ( @@ -130,7 +135,9 @@ func (t *Token) isEmpty() bool { } // Credentials holds Google credentials, including -// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials). +// [Application Default Credentials]. +// +// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials type Credentials struct { json []byte projectID CredentialsPropertyProvider @@ -258,7 +265,7 @@ func (ctpo *CachedTokenProviderOptions) autoRefresh() bool { } func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration { - if ctpo == nil { + if ctpo == nil || ctpo.ExpireEarly == 0 { return defaultExpiryDelta } return ctpo.ExpireEarly diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go index cce6224186a73..010afc37c8fe4 100644 --- a/vendor/cloud.google.com/go/auth/credentials/detect.go +++ b/vendor/cloud.google.com/go/auth/credentials/detect.go @@ -98,8 +98,8 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) { if OnGCE() { return auth.NewCredentials(&auth.CredentialsOptions{ TokenProvider: computeTokenProvider(opts), - ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) { - return metadata.ProjectID() + ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) { + return metadata.ProjectIDWithContext(ctx) }), UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{}, }), nil diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index b426e16d29757..6591b181132f7 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -33,7 +33,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { return nil, err } - var projectID, quotaProjectID, universeDomain string + var projectID, universeDomain string var tp auth.TokenProvider switch fileType { case credsfile.ServiceAccountKey: @@ -56,7 +56,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ExternalAccountKey: f, err := credsfile.ParseExternalAccount(b) @@ -67,7 +66,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) case credsfile.ExternalAccountAuthorizedUserKey: f, err := credsfile.ParseExternalAccountAuthorizedUser(b) @@ -78,7 +76,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { if err != nil { return nil, err } - quotaProjectID = f.QuotaProjectID universeDomain = f.UniverseDomain case credsfile.ImpersonatedServiceAccountKey: f, err := credsfile.ParseImpersonatedServiceAccount(b) @@ -108,9 +105,9 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) { TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{ ExpireEarly: opts.EarlyTokenRefresh, }), - JSON: b, - ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), - QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID), + JSON: b, + ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID), + // TODO(codyoss): only set quota project here if there was a user override UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain), }), nil } @@ -127,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string } func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) if opts.UseSelfSignedJWT { return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) } opts2LO := &auth.Options2LO{ Email: f.ClientEmail, diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index b62a8ae4d5d70..6ae29de6c2789 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -17,6 +17,7 @@ package credentials import ( "context" "crypto/rsa" + "errors" "fmt" "strings" "time" @@ -35,6 +36,9 @@ var ( // configureSelfSignedJWT uses the private key in the service account to create // a JWT without making a network call. func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } pk, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 0442a5938a800..21488e29f0b71 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package grpctransport provides functionality for managing gRPC client +// connections to Google Cloud services. package grpctransport import ( @@ -38,7 +40,7 @@ const ( // Check env to decide if using google-c2p resolver for DirectPath traffic. enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) var ( @@ -271,7 +273,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er if metadata == nil { metadata = make(map[string]string, 1) } - metadata[quotaProjectHeaderKey] = qp + // Don't overwrite user specified quota + if _, ok := metadata[quotaProjectHeaderKey]; !ok { + metadata[quotaProjectHeaderKey] = qp + } } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(&grpcCredentialsProvider{ @@ -291,7 +296,7 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er grpcOpts = addOCStatsHandler(grpcOpts, opts) grpcOpts = append(grpcOpts, opts.GRPCDialOpts...) - return grpc.DialContext(ctx, endpoint, grpcOpts...) + return grpc.NewClient(endpoint, grpcOpts...) } // grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials. diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go index 969c8d4d2008c..30fedf9562f96 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package httptransport provides functionality for managing HTTP client +// connections to Google Cloud services. package httptransport import ( diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 07eea474446b3..274bb01254c92 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -31,7 +31,7 @@ import ( ) const ( - quotaProjectHeaderKey = "X-Goog-User-Project" + quotaProjectHeaderKey = "X-goog-user-project" ) func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) { @@ -76,7 +76,10 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err if headers == nil { headers = make(map[string][]string, 1) } - headers.Set(quotaProjectHeaderKey, qp) + // Don't overwrite user specified quota + if v := headers.Get(quotaProjectHeaderKey); v == "" { + headers.Set(quotaProjectHeaderKey, qp) + } } creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil) trans = &authTransport{ @@ -94,7 +97,11 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err // http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - trans := http.DefaultTransport.(*http.Transport).Clone() + defaultTransport, ok := http.DefaultTransport.(*http.Transport) + if !ok { + defaultTransport = transport.BaseTransport() + } + trans := defaultTransport.Clone() trans.MaxIdleConnsPerHost = 100 if clientCertSource != nil { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go index 3227aba280c8e..738cb21618e79 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go @@ -62,11 +62,11 @@ func NewSecureConnectProvider(configFilePath string) (Provider, error) { file, err := os.ReadFile(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Secure Connect is not supported. - return nil, errSourceUnavailable - } - return nil, err + // Config file missing means Secure Connect is not supported. + // There are non-os.ErrNotExist errors that may be returned. + // (e.g. if the home directory is /dev/null, *nix systems will + // return ENOTDIR instead of ENOENT) + return nil, errSourceUnavailable } var metadata secureConnectMetadata diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go index 4df73edce986e..37894bfcd013b 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go @@ -15,6 +15,7 @@ package transport import ( + "context" "encoding/json" "fmt" "log" @@ -84,7 +85,7 @@ func getMetadataMTLSAutoConfig() { } var httpGetMetadataMTLSConfig = func() (string, error) { - return metadata.Get(configEndpointSuffix) + return metadata.GetWithContext(context.Background(), configEndpointSuffix) } func queryConfig() (*mtlsConfig, error) { diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go index 718a6b1714586..cc586ec5b1a5d 100644 --- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go +++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go @@ -81,12 +81,14 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri // DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS. func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client { - trans := baseTransport() + trans := BaseTransport() trans.TLSClientConfig = tlsConfig return &http.Client{Transport: trans} } -func baseTransport() *http.Transport { +// BaseTransport returns a default [http.Transport] which can be used if +// [http.DefaultTransport] has been overwritten. +func BaseTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ diff --git a/vendor/cloud.google.com/go/bigtable/CHANGES.md b/vendor/cloud.google.com/go/bigtable/CHANGES.md index 05b4291c9fd32..d102ec70e16be 100644 --- a/vendor/cloud.google.com/go/bigtable/CHANGES.md +++ b/vendor/cloud.google.com/go/bigtable/CHANGES.md @@ -1,5 +1,32 @@ # Changes +## [1.31.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.30.0...bigtable/v1.31.0) (2024-08-28) + + +### Features + +* **bigtable:** Using new auth library ([#10766](https://github.com/googleapis/google-cloud-go/issues/10766)) ([8daf304](https://github.com/googleapis/google-cloud-go/commit/8daf304faf3808690996ad3a45d2890b107a0939)) + + +### Performance Improvements + +* **bigtable:** Use RecvMsg instead of Recv ([#10674](https://github.com/googleapis/google-cloud-go/issues/10674)) ([7e4fbc5](https://github.com/googleapis/google-cloud-go/commit/7e4fbc5612441c59bfaa1e5b9bbd06e3387b5c02)) + +## [1.30.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.29.0...bigtable/v1.30.0) (2024-08-26) + + +### Features + +* **bigtable:** Add MarshalJSON to allow clients to get a stringified version of the protobuf ([#10679](https://github.com/googleapis/google-cloud-go/issues/10679)) ([663f399](https://github.com/googleapis/google-cloud-go/commit/663f3996ced66c312c8202535574d3ffcb72d283)) +* **bigtable:** Add nil checks to Equal method ([#10758](https://github.com/googleapis/google-cloud-go/issues/10758)) ([f1aad7f](https://github.com/googleapis/google-cloud-go/commit/f1aad7f3a05a959d0dd973e026026391deda7657)) +* **bigtable:** Add UpdateFamily to allow updating a family type ([#10759](https://github.com/googleapis/google-cloud-go/issues/10759)) ([ec0cbb2](https://github.com/googleapis/google-cloud-go/commit/ec0cbb20ba42b7ef03688a06dc0a380e9b27e394)) +* **bigtable:** Update go version for conformance tests ([#10743](https://github.com/googleapis/google-cloud-go/issues/10743)) ([74cf45e](https://github.com/googleapis/google-cloud-go/commit/74cf45efe7dc6f74cadac3f015a705f8dbf69622)) + + +### Bug Fixes + +* **bigtable:** Use new auth library ([#10670](https://github.com/googleapis/google-cloud-go/issues/10670)) ([fab520d](https://github.com/googleapis/google-cloud-go/commit/fab520d226340bbf1aedc001dcb7384651e075a0)) + ## [1.29.0](https://github.com/googleapis/google-cloud-go/compare/bigtable/v1.28.0...bigtable/v1.29.0) (2024-08-09) diff --git a/vendor/cloud.google.com/go/bigtable/admin.go b/vendor/cloud.google.com/go/bigtable/admin.go index 8dd9f929056fa..69f57f0efa7b8 100644 --- a/vendor/cloud.google.com/go/bigtable/admin.go +++ b/vendor/cloud.google.com/go/bigtable/admin.go @@ -667,41 +667,82 @@ func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, return ti, nil } -type gcPolicySettings struct { +type updateFamilyOption struct { ignoreWarnings bool } -// GCPolicyOption is the interface to change GC policy settings +// GCPolicyOption is deprecated, kept for backwards compatibility, use UpdateFamilyOption in new code type GCPolicyOption interface { - apply(s *gcPolicySettings) + apply(s *updateFamilyOption) } +// UpdateFamilyOption is the interface to update family settings +type UpdateFamilyOption GCPolicyOption + type ignoreWarnings bool -func (w ignoreWarnings) apply(s *gcPolicySettings) { +func (w ignoreWarnings) apply(s *updateFamilyOption) { s.ignoreWarnings = bool(w) } -// IgnoreWarnings returns a gcPolicyOption that ignores safety checks when modifying the column families +// IgnoreWarnings returns a updateFamilyOption that ignores safety checks when modifying the column families func IgnoreWarnings() GCPolicyOption { return ignoreWarnings(true) } -func (ac *AdminClient) setGCPolicy(ctx context.Context, table, family string, policy GCPolicy, opts ...GCPolicyOption) error { +// SetGCPolicy specifies which cells in a column family should be garbage collected. +// GC executes opportunistically in the background; table reads may return data +// matching the GC policy. +func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error { + return ac.UpdateFamily(ctx, table, family, Family{GCPolicy: policy}) +} + +// SetGCPolicyWithOptions is similar to SetGCPolicy but allows passing options +func (ac *AdminClient) SetGCPolicyWithOptions(ctx context.Context, table, family string, policy GCPolicy, opts ...GCPolicyOption) error { + familyOpts := []UpdateFamilyOption{} + for _, opt := range opts { + if opt != nil { + familyOpts = append(familyOpts, opt.(UpdateFamilyOption)) + } + } + return ac.UpdateFamily(ctx, table, family, Family{GCPolicy: policy}, familyOpts...) +} + +// UpdateFamily updates column families' garbage colleciton policies and value type. +func (ac *AdminClient) UpdateFamily(ctx context.Context, table, familyName string, family Family, opts ...UpdateFamilyOption) error { ctx = mergeOutgoingMetadata(ctx, ac.md) prefix := ac.instancePrefix() - s := gcPolicySettings{} + s := updateFamilyOption{} for _, opt := range opts { if opt != nil { opt.apply(&s) } } + + cf := &btapb.ColumnFamily{} + mask := &field_mask.FieldMask{} + if family.GCPolicy != nil { + cf.GcRule = family.GCPolicy.proto() + mask.Paths = append(mask.Paths, "gc_rule") + + } + if family.ValueType != nil { + cf.ValueType = family.ValueType.proto() + mask.Paths = append(mask.Paths, "value_type") + } + + // No update + if len(mask.Paths) == 0 { + return nil + } + req := &btapb.ModifyColumnFamiliesRequest{ Name: prefix + "/tables/" + table, Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ - Id: family, - Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{GcRule: policy.proto()}}, + Id: familyName, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: cf}, + UpdateMask: mask, }}, IgnoreWarnings: s.ignoreWarnings, } @@ -709,18 +750,6 @@ func (ac *AdminClient) setGCPolicy(ctx context.Context, table, family string, po return err } -// SetGCPolicy specifies which cells in a column family should be garbage collected. -// GC executes opportunistically in the background; table reads may return data -// matching the GC policy. -func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error { - return ac.SetGCPolicyWithOptions(ctx, table, family, policy) -} - -// SetGCPolicyWithOptions is similar to SetGCPolicy but allows passing options -func (ac *AdminClient) SetGCPolicyWithOptions(ctx context.Context, table, family string, policy GCPolicy, opts ...GCPolicyOption) error { - return ac.setGCPolicy(ctx, table, family, policy, opts...) -} - // DropRowRange permanently deletes a row range from the specified table. func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix string) error { ctx = mergeOutgoingMetadata(ctx, ac.md) diff --git a/vendor/cloud.google.com/go/bigtable/bigtable.go b/vendor/cloud.google.com/go/bigtable/bigtable.go index e12035db4e7a8..7a57b41372345 100644 --- a/vendor/cloud.google.com/go/bigtable/bigtable.go +++ b/vendor/cloud.google.com/go/bigtable/bigtable.go @@ -109,6 +109,7 @@ func NewClientWithConfig(ctx context.Context, project, instance string, config C // Allow non-default service account in DirectPath. o = append(o, internaloption.AllowNonDefaultServiceAccount(true)) + o = append(o, internaloption.EnableNewAuthLibrary()) o = append(o, opts...) connPool, err := gtransport.DialPool(ctx, o...) if err != nil { @@ -395,8 +396,10 @@ func (t *Table) readRows(ctx context.Context, arg RowSet, f func(Row) bool, mt * // Ignore error since header is only being used to record builtin metrics // Failure to record metrics should not fail the operation *headerMD, _ = stream.Header() + res := new(btpb.ReadRowsResponse) for { - res, err := stream.Recv() + proto.Reset(res) + err := stream.RecvMsg(res) if err == io.EOF { *trailerMD = stream.Trailer() break @@ -437,7 +440,8 @@ func (t *Table) readRows(ctx context.Context, arg RowSet, f func(Row) bool, mt * // Cancel and drain stream. cancel() for { - if _, err := stream.Recv(); err != nil { + proto.Reset(res) + if err := stream.RecvMsg(res); err != nil { *trailerMD = stream.Trailer() // The stream has ended. We don't return an error // because the caller has intentionally interrupted the scan. diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go index c53bb2b4dc008..b2da28599ebf7 100644 --- a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go @@ -333,10 +333,15 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu return true }) } else if modify := mod.GetUpdate(); modify != nil { - if _, ok := tbl.families[mod.Id]; !ok { + newcf := newColumnFamily(req.Name+"/columnFamilies/"+mod.Id, 0, modify) + cf, ok := tbl.families[mod.Id] + if !ok { return nil, fmt.Errorf("no such family %q", mod.Id) } - newcf := newColumnFamily(req.Name+"/columnFamilies/"+mod.Id, 0, modify) + if cf.valueType != newcf.valueType { + return nil, status.Errorf(codes.InvalidArgument, "Immutable fields 'value_type' cannot be updated") + } + // assume that we ALWAYS want to replace by the new setting // we may need partial update through tbl.families[mod.Id] = newcf diff --git a/vendor/cloud.google.com/go/bigtable/conformance_test.sh b/vendor/cloud.google.com/go/bigtable/conformance_test.sh index 35a126e2f94d8..bf6f520a6b0c0 100644 --- a/vendor/cloud.google.com/go/bigtable/conformance_test.sh +++ b/vendor/cloud.google.com/go/bigtable/conformance_test.sh @@ -50,10 +50,10 @@ trap cleanup EXIT # Run the conformance tests cd $conformanceTestsHome -# Tests in https://github.com/googleapis/cloud-bigtable-clients-test/tree/main/tests can only be run on go1.20.2 -go install golang.org/dl/go1.20.2@latest -go1.20.2 download -go1.20.2 test -v -proxy_addr=:$testProxyPort | tee -a $sponge_log +# Tests in https://github.com/googleapis/cloud-bigtable-clients-test/tree/main/tests can only be run on go1.22.5 +go install golang.org/dl/go1.22.5@latest +go1.22.5 download +go1.22.5 test -v -proxy_addr=:$testProxyPort | tee -a $sponge_log RETURN_CODE=$? echo "exiting with ${RETURN_CODE}" diff --git a/vendor/cloud.google.com/go/bigtable/internal/version.go b/vendor/cloud.google.com/go/bigtable/internal/version.go index a08cb7cabc62d..9c7380dc515f0 100644 --- a/vendor/cloud.google.com/go/bigtable/internal/version.go +++ b/vendor/cloud.google.com/go/bigtable/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.29.0" +const Version = "1.31.0" diff --git a/vendor/cloud.google.com/go/bigtable/type.go b/vendor/cloud.google.com/go/bigtable/type.go index 59f954f081f7b..4d0d8dbf5fe62 100644 --- a/vendor/cloud.google.com/go/bigtable/type.go +++ b/vendor/cloud.google.com/go/bigtable/type.go @@ -16,7 +16,11 @@ limitations under the License. package bigtable -import btapb "cloud.google.com/go/bigtable/admin/apiv2/adminpb" +import ( + btapb "cloud.google.com/go/bigtable/admin/apiv2/adminpb" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) // Type wraps the protobuf representation of a type. See the protobuf definition // for more details on types. @@ -24,6 +28,34 @@ type Type interface { proto() *btapb.Type } +var marshalOptions = protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} +var unmarshalOptions = protojson.UnmarshalOptions{AllowPartial: true} + +// MarshalJSON returns the string representation of the Type protobuf. +func MarshalJSON(t Type) ([]byte, error) { + return marshalOptions.Marshal(t.proto()) +} + +// UnmarshalJSON returns a Type object from json bytes. +func UnmarshalJSON(data []byte) (Type, error) { + result := &btapb.Type{} + if err := unmarshalOptions.Unmarshal(data, result); err != nil { + return nil, err + } + return ProtoToType(result), nil +} + +// Equal compares Type objects. +func Equal(a, b Type) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + return proto.Equal(a.proto(), b.proto()) +} + type unknown[T interface{}] struct { wrapped *T } @@ -205,6 +237,8 @@ func ProtoToType(pb *btapb.Type) Type { return int64ProtoToType(t.Int64Type) case *btapb.Type_BytesType: return bytesProtoToType(t.BytesType) + case *btapb.Type_StringType: + return stringProtoToType(t.StringType) case *btapb.Type_AggregateType: return aggregateProtoToType(t.AggregateType) default: @@ -229,6 +263,23 @@ func bytesProtoToType(b *btapb.Type_Bytes) BytesType { return BytesType{Encoding: bytesEncodingProtoToType(b.Encoding)} } +func stringEncodingProtoToType(se *btapb.Type_String_Encoding) StringEncoding { + if se == nil { + return unknown[btapb.Type_String_Encoding]{wrapped: se} + } + + switch se.Encoding.(type) { + case *btapb.Type_String_Encoding_Utf8Raw_: + return StringUtf8Encoding{} + default: + return unknown[btapb.Type_String_Encoding]{wrapped: se} + } +} + +func stringProtoToType(s *btapb.Type_String) Type { + return StringType{Encoding: stringEncodingProtoToType(s.Encoding)} +} + func int64EncodingProtoToEncoding(ie *btapb.Type_Int64_Encoding) Int64Encoding { if ie == nil { return unknown[btapb.Type_Int64_Encoding]{wrapped: ie} @@ -246,7 +297,7 @@ func int64ProtoToType(i *btapb.Type_Int64) Type { return Int64Type{Encoding: int64EncodingProtoToEncoding(i.Encoding)} } -func aggregateProtoToType(agg *btapb.Type_Aggregate) Type { +func aggregateProtoToType(agg *btapb.Type_Aggregate) AggregateType { if agg == nil { return AggregateType{Input: nil, Aggregator: unknownAggregator{wrapped: agg}} } diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 9594e1e2793c6..811b6a0d06c77 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + ## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go index bb412f8917e31..2e53f01230090 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -17,10 +17,15 @@ package metadata -import "syscall" +import ( + "errors" + "syscall" +) func init() { // Initialize syscallRetryable to return true on transient socket-level // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index 89f757b1161c3..498a15a5fcd52 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,13 @@ # Changes +## [1.2.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.0...iam/v1.2.1) (2024-09-12) + + +### Bug Fixes + +* **iam:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) + ## [1.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.13...iam/v1.2.0) (2024-08-20) diff --git a/vendor/cloud.google.com/go/pubsub/CHANGES.md b/vendor/cloud.google.com/go/pubsub/CHANGES.md index 64aeb66a13021..88176bd51dfac 100644 --- a/vendor/cloud.google.com/go/pubsub/CHANGES.md +++ b/vendor/cloud.google.com/go/pubsub/CHANGES.md @@ -1,5 +1,20 @@ # Changes +## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.42.0...pubsub/v1.43.0) (2024-09-09) + + +### Features + +* **pubsub:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18)) +* **pubsub:** Allow trace extraction from protobuf message ([#10827](https://github.com/googleapis/google-cloud-go/issues/10827)) ([caa826c](https://github.com/googleapis/google-cloud-go/commit/caa826cea826473ebf4c806b57b0c3b0a2f0f365)) + + +### Bug Fixes + +* **pubsub:** Add attributes before startSpan ([#10800](https://github.com/googleapis/google-cloud-go/issues/10800)) ([48addbf](https://github.com/googleapis/google-cloud-go/commit/48addbff725ee2bb226ce0ab926415c27fd4ffad)) +* **pubsub:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04)) +* **pubsub:** Close grpc streams on retry ([#10624](https://github.com/googleapis/google-cloud-go/issues/10624)) ([79a0e11](https://github.com/googleapis/google-cloud-go/commit/79a0e118c88190cbe1b56250a75b67bd98b0d7f2)) + ## [1.42.0](https://github.com/googleapis/google-cloud-go/compare/pubsub/v1.41.0...pubsub/v1.42.0) (2024-08-19) diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go b/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go new file mode 100644 index 0000000000000..c7a04ffb92e96 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/auxiliary_go123.go @@ -0,0 +1,56 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +//go:build go1.23 + +package pubsub + +import ( + "iter" + + pubsubpb "cloud.google.com/go/pubsub/apiv1/pubsubpb" + "github.com/googleapis/gax-go/v2/iterator" +) + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SchemaIterator) All() iter.Seq2[*pubsubpb.Schema, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SnapshotIterator) All() iter.Seq2[*pubsubpb.Snapshot, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *StringIterator) All() iter.Seq2[string, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *SubscriptionIterator) All() iter.Seq2[*pubsubpb.Subscription, error] { + return iterator.RangeAdapter(it.Next) +} + +// All returns an iterator. If an error is returned by the iterator, the +// iterator will stop after that iteration. +func (it *TopicIterator) All() iter.Seq2[*pubsubpb.Topic, error] { + return iterator.RangeAdapter(it.Next) +} diff --git a/vendor/cloud.google.com/go/pubsub/internal/version.go b/vendor/cloud.google.com/go/pubsub/internal/version.go index f37b860085c8d..e5b2de09172f5 100644 --- a/vendor/cloud.google.com/go/pubsub/internal/version.go +++ b/vendor/cloud.google.com/go/pubsub/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.42.0" +const Version = "1.43.0" diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go index 9f60359040fbb..4f791fa6c06ae 100644 --- a/vendor/cloud.google.com/go/pubsub/iterator.go +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -335,14 +335,17 @@ func (it *messageIterator) receive(maxToPull int32) ([]*Message, error) { if m.Attributes != nil { ctx = propagation.TraceContext{}.Extract(ctx, newMessageCarrier(m)) } - attr := getSubscriberOpts(it.projectID, it.subID, m) - _, span := startSpan(ctx, subscribeSpanName, it.subID, attr...) - span.SetAttributes( - attribute.Bool(eosAttribute, it.enableExactlyOnceDelivery), - attribute.String(ackIDAttribute, ackID), - semconv.MessagingBatchMessageCount(len(msgs)), - semconv.CodeFunction("receive"), + opts := getSubscriberOpts(it.projectID, it.subID, m) + opts = append( + opts, + trace.WithAttributes( + attribute.Bool(eosAttribute, it.enableExactlyOnceDelivery), + attribute.String(ackIDAttribute, ackID), + semconv.MessagingBatchMessageCount(len(msgs)), + semconv.CodeFunction("receive"), + ), ) + _, span := startSpan(ctx, subscribeSpanName, it.subID, opts...) // Always store the subscribe span, even if sampling isn't enabled. // This is useful since we need to propagate the sampling flag // to the callback in Receive, so traces have an unbroken sampling decision. @@ -658,11 +661,16 @@ func (it *messageIterator) sendAck(m map[string]*AckResult) { // Create the single ack span for this request, and for each // message, add Subscribe<->Ack links. opts := getCommonOptions(it.projectID, it.subID) - opts = append(opts, trace.WithLinks(links...)) + opts = append( + opts, + trace.WithLinks(links...), + trace.WithAttributes( + semconv.MessagingBatchMessageCount(len(ackIDs)), + semconv.CodeFunction("sendAck"), + ), + ) _, ackSpan := startSpan(context.Background(), ackSpanName, it.subID, opts...) defer ackSpan.End() - ackSpan.SetAttributes(semconv.MessagingBatchMessageCount(len(ackIDs)), - semconv.CodeFunction("sendAck")) if ackSpan.SpanContext().IsSampled() { for _, s := range subscribeSpans { s.AddLink(trace.Link{ @@ -740,16 +748,25 @@ func (it *messageIterator) sendModAck(m map[string]*AckResult, deadline time.Dur // Create the single modack/nack span for this request, and for each // message, add Subscribe<->Modack links. opts := getCommonOptions(it.projectID, it.subID) - opts = append(opts, trace.WithLinks(links...)) - _, mSpan := startSpan(context.Background(), spanName, it.subID, opts...) - defer mSpan.End() + opts = append( + opts, + trace.WithLinks(links...), + trace.WithAttributes( + semconv.MessagingBatchMessageCount(len(ackIDs)), + semconv.CodeFunction("sendModAck"), + ), + ) if !isNack { - mSpan.SetAttributes( - semconv.MessagingGCPPubsubMessageAckDeadline(int(deadlineSec)), - attribute.Bool(receiptModackAttribute, isReceipt)) + opts = append( + opts, + trace.WithAttributes( + semconv.MessagingGCPPubsubMessageAckDeadline(int(deadlineSec)), + attribute.Bool(receiptModackAttribute, isReceipt), + ), + ) } - mSpan.SetAttributes(semconv.MessagingBatchMessageCount(len(ackIDs)), - semconv.CodeFunction("sendModAck")) + _, mSpan := startSpan(context.Background(), spanName, it.subID, opts...) + defer mSpan.End() if mSpan.SpanContext().IsSampled() { for _, s := range subscribeSpans { s.AddLink(trace.Link{ diff --git a/vendor/cloud.google.com/go/pubsub/pullstream.go b/vendor/cloud.google.com/go/pubsub/pullstream.go index c5ea8f510af82..231e5a64ada9d 100644 --- a/vendor/cloud.google.com/go/pubsub/pullstream.go +++ b/vendor/cloud.google.com/go/pubsub/pullstream.go @@ -31,8 +31,9 @@ import ( // the stream on a retryable error. type pullStream struct { ctx context.Context - open func() (pb.Subscriber_StreamingPullClient, error) - cancel context.CancelFunc + cancel context.CancelFunc // cancel function of the context above + open func() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error) + close context.CancelFunc // cancel function to close down the currently open stream mu sync.Mutex spc *pb.Subscriber_StreamingPullClient @@ -50,8 +51,9 @@ func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName return &pullStream{ ctx: ctx, cancel: cancel, - open: func() (pb.Subscriber_StreamingPullClient, error) { - spc, err := streamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) + open: func() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error) { + sctx, close := context.WithCancel(ctx) + spc, err := streamingPull(sctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) if err == nil { recordStat(ctx, StreamRequestCount, 1) streamAckDeadline := int32(maxDurationPerLeaseExtension / time.Second) @@ -69,9 +71,10 @@ func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName }) } if err != nil { - return nil, err + close() + return nil, nil, err } - return spc, nil + return spc, close, nil }, } } @@ -100,29 +103,33 @@ func (s *pullStream) get(spc *pb.Subscriber_StreamingPullClient) (*pb.Subscriber if spc != s.spc { return s.spc, nil } + // we are about to open a new stream: if necessary, make sure the previous one is closed + if s.close != nil { + s.close() + } // Either this is the very first call on this stream (s.spc == nil), or we have a valid // retry request. Either way, open a new stream. // The lock is held here for a long time, but it doesn't matter because no callers could get // anything done anyway. s.spc = new(pb.Subscriber_StreamingPullClient) - *s.spc, s.err = s.openWithRetry() // Any error from openWithRetry is permanent. + *s.spc, s.close, s.err = s.openWithRetry() // Any error from openWithRetry is permanent. return s.spc, s.err } -func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, error) { +func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, context.CancelFunc, error) { r := defaultRetryer{} for { recordStat(s.ctx, StreamOpenCount, 1) - spc, err := s.open() + spc, close, err := s.open() bo, shouldRetry := r.Retry(err) if err != nil && shouldRetry { recordStat(s.ctx, StreamRetryCount, 1) if err := gax.Sleep(s.ctx, bo); err != nil { - return nil, err + return nil, nil, err } continue } - return spc, err + return spc, close, err } } diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go index 1991fa7f03ce8..cd6216e2f829a 100644 --- a/vendor/cloud.google.com/go/pubsub/topic.go +++ b/vendor/cloud.google.com/go/pubsub/topic.go @@ -748,8 +748,8 @@ func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { var createSpan trace.Span if t.enableTracing { opts := getPublishSpanAttributes(t.c.projectID, t.ID(), msg) + opts = append(opts, trace.WithAttributes(semconv.CodeFunction("Publish"))) ctx, createSpan = startSpan(ctx, createSpanName, t.ID(), opts...) - createSpan.SetAttributes(semconv.CodeFunction("Publish")) } ctx, err := tag.New(ctx, tag.Insert(keyStatus, "OK"), tag.Upsert(keyTopic, t.name)) if err != nil { @@ -973,8 +973,14 @@ func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) opts := getCommonOptions(projectID, topicID) // Add link to publish RPC span of createSpan(s). opts = append(opts, trace.WithLinks(links...)) + opts = append( + opts, + trace.WithAttributes( + semconv.MessagingBatchMessageCount(numMsgs), + semconv.CodeFunction("publishMessageBundle"), + ), + ) ctx, pSpan = startSpan(ctx, publishRPCSpanName, topicID, opts...) - pSpan.SetAttributes(semconv.MessagingBatchMessageCount(numMsgs), semconv.CodeFunction("publishMessageBundle")) defer pSpan.End() // Add the reverse link to createSpan(s) of publish RPC span. diff --git a/vendor/cloud.google.com/go/pubsub/trace.go b/vendor/cloud.google.com/go/pubsub/trace.go index 1d41e9d894221..51112bb50f5f3 100644 --- a/vendor/cloud.google.com/go/pubsub/trace.go +++ b/vendor/cloud.google.com/go/pubsub/trace.go @@ -20,6 +20,7 @@ import ( "log" "sync" + pb "cloud.google.com/go/pubsub/apiv1/pubsubpb" "cloud.google.com/go/pubsub/internal" "go.opencensus.io/stats" "go.opencensus.io/stats/view" @@ -273,33 +274,42 @@ func tracer() trace.Tracer { var _ propagation.TextMapCarrier = (*messageCarrier)(nil) -// messageCarrier injects and extracts traces from a pubsub.Message. +// messageCarrier injects and extracts traces from pubsub.Message attributes. type messageCarrier struct { - msg *Message + attributes map[string]string } const googclientPrefix string = "googclient_" // newMessageCarrier creates a new PubsubMessageCarrier. func newMessageCarrier(msg *Message) messageCarrier { - return messageCarrier{msg: msg} + return messageCarrier{attributes: msg.Attributes} +} + +// NewMessageCarrierFromPB creates a propagation.TextMapCarrier that can be used to extract the trace +// context from a protobuf PubsubMessage. +// +// Example: +// ctx = propagation.TraceContext{}.Extract(ctx, pubsub.NewMessageCarrierFromPB(msg)) +func NewMessageCarrierFromPB(msg *pb.PubsubMessage) propagation.TextMapCarrier { + return messageCarrier{attributes: msg.Attributes} } // Get retrieves a single value for a given key. func (c messageCarrier) Get(key string) string { - return c.msg.Attributes[googclientPrefix+key] + return c.attributes[googclientPrefix+key] } // Set sets an attribute. func (c messageCarrier) Set(key, val string) { - c.msg.Attributes[googclientPrefix+key] = val + c.attributes[googclientPrefix+key] = val } // Keys returns a slice of all keys in the carrier. func (c messageCarrier) Keys() []string { i := 0 - out := make([]string, len(c.msg.Attributes)) - for k := range c.msg.Attributes { + out := make([]string, len(c.attributes)) + for k := range c.attributes { out[i] = k i++ } diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go index 0b702e5d0612e..7299227a3d952 100644 --- a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go @@ -7,7 +7,6 @@ package v3 import ( - _ "github.com/cncf/xds/go/xds/annotations/v3" v3 "github.com/cncf/xds/go/xds/type/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -84,26 +83,23 @@ var file_xds_type_matcher_v3_cel_proto_rawDesc = []byte{ 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, - 0x76, 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, - 0x78, 0x70, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x60, 0xd2, 0xc6, 0xa4, 0xe1, - 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, - 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, + 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78, + 0x70, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x58, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, + 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go index f53a4ee947829..4393bb7e29201 100644 --- a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go @@ -7,7 +7,6 @@ package v3 import ( - _ "github.com/cncf/xds/go/xds/annotations/v3" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -65,18 +64,16 @@ var file_xds_type_matcher_v3_http_inputs_proto_rawDesc = []byte{ 0x0a, 0x25, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, - 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, - 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x1d, 0x0a, - 0x1b, 0x48, 0x74, 0x74, 0x70, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, - 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x67, 0xd2, 0xc6, - 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, - 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, - 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x22, 0x1d, 0x0a, 0x1b, + 0x48, 0x74, 0x74, 0x70, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x65, + 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x5f, 0x0a, 0x1e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48, + 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, + 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go index f88e67e681fc0..d94b03b55958f 100644 --- a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go +++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go @@ -7,7 +7,6 @@ package v3 import ( - _ "github.com/cncf/xds/go/xds/annotations/v3" v3 "github.com/cncf/xds/go/xds/core/v3" _ "github.com/envoyproxy/protoc-gen-validate/validate" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -698,149 +697,146 @@ var file_xds_type_matcher_v3_matcher_proto_rawDesc = []byte{ 0x0a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, - 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x80, 0x10, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4d, 0x0a, - 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, - 0x0b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xf6, 0x0f, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0c, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, 0x00, 0x52, 0x0b, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6f, - 0x6e, 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, - 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x1a, 0x91, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x38, 0x0a, - 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, - 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb6, 0x08, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, - 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x73, 0x1a, 0x91, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x12, 0x6f, 0x0a, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x78, 0x64, 0x73, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, - 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, - 0x52, 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x12, 0x61, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, - 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x1a, 0x91, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x38, 0x0a, 0x07, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb6, 0x08, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x73, 0x1a, 0x91, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, + 0x6f, 0x0a, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, - 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, - 0x6e, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, - 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x1a, 0xf3, 0x01, 0x0a, 0x0f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, - 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, - 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x78, - 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, - 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x48, 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, - 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb5, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, - 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xa9, - 0x04, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x41, - 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, - 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x12, 0x5b, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, - 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, - 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, - 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, - 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x5d, - 0x0a, 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, - 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, + 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, + 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x61, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, + 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, - 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x46, 0x0a, - 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, + 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72, 0x65, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x6e, + 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, + 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, + 0xf3, 0x01, 0x0a, 0x0f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x78, 0x64, + 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, + 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, + 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x46, 0x0a, + 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xc0, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, - 0x61, 0x70, 0x12, 0x56, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x3a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, - 0x61, 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, - 0x9a, 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x5c, 0x0a, 0x08, 0x4d, 0x61, - 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb5, 0x01, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, + 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x08, 0xfa, + 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xa9, 0x04, + 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x41, 0x0a, + 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, + 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x5b, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, + 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, + 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0d, + 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x5d, 0x0a, + 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61, + 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x3a, 0x08, 0xd2, 0xc6, 0xa4, 0xe1, - 0x06, 0x02, 0x08, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x42, 0x5c, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, - 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, - 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, + 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x46, 0x0a, 0x0c, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, + 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x1a, 0xc0, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, + 0x70, 0x12, 0x56, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, + 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, + 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x9a, + 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x5c, 0x0a, 0x08, 0x4d, 0x61, 0x70, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09, 0x74, 0x72, 0x65, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x5c, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, + 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/fsouza/fake-gcs-server/LICENSE b/vendor/github.com/fsouza/fake-gcs-server/LICENSE index 529faa468606e..a619aaecef9d1 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/LICENSE +++ b/vendor/github.com/fsouza/fake-gcs-server/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2017-2019, Francisco Souza +Copyright (c) Francisco Souza All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go index e2fa2ad3716ee..4026f1a4a0deb 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/bucket.go @@ -6,49 +6,161 @@ package fakestorage import ( "encoding/json" + "errors" + "fmt" + "io" "net/http" + "regexp" + "github.com/fsouza/fake-gcs-server/internal/backend" "github.com/gorilla/mux" ) +var bucketRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9._-]*[a-zA-Z0-9]$`) + // CreateBucket creates a bucket inside the server, so any API calls that // require the bucket name will recognize this bucket. // // If the bucket already exists, this method does nothing. +// +// Deprecated: use CreateBucketWithOpts. func (s *Server) CreateBucket(name string) { - s.mtx.Lock() - defer s.mtx.Unlock() - err := s.backend.CreateBucket(name) + err := s.backend.CreateBucket(name, backend.BucketAttrs{VersioningEnabled: false, DefaultEventBasedHold: false}) + if err != nil { + panic(err) + } +} + +func (s *Server) updateBucket(r *http.Request) jsonResponse { + bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] + attrsToUpdate := getBucketAttrsToUpdate(r.Body) + err := s.backend.UpdateBucket(bucketName, attrsToUpdate) if err != nil { panic(err) } + return jsonResponse{} +} + +func getBucketAttrsToUpdate(body io.ReadCloser) backend.BucketAttrs { + var data struct { + DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"` + Versioning bucketVersioning `json:"versioning,omitempty"` + } + err := json.NewDecoder(body).Decode(&data) + if err != nil { + panic(err) + } + attrsToUpdate := backend.BucketAttrs{ + DefaultEventBasedHold: data.DefaultEventBasedHold, + VersioningEnabled: data.Versioning.Enabled, + } + return attrsToUpdate +} + +// CreateBucketOpts defines the properties of a bucket you can create with +// CreateBucketWithOpts. +type CreateBucketOpts struct { + Name string + VersioningEnabled bool + DefaultEventBasedHold bool +} + +// CreateBucketWithOpts creates a bucket inside the server, so any API calls that +// require the bucket name will recognize this bucket. Use CreateBucketOpts to +// customize the options for this bucket +// +// If the underlying backend returns an error, this method panics. +func (s *Server) CreateBucketWithOpts(opts CreateBucketOpts) { + err := s.backend.CreateBucket(opts.Name, backend.BucketAttrs{VersioningEnabled: opts.VersioningEnabled, DefaultEventBasedHold: opts.DefaultEventBasedHold}) + if err != nil { + panic(err) + } +} + +func (s *Server) createBucketByPost(r *http.Request) jsonResponse { + // Minimal version of Bucket from google.golang.org/api/storage/v1 + + var data struct { + Name string `json:"name,omitempty"` + Versioning *bucketVersioning `json:"versioning,omitempty"` + DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"` + } + + // Read the bucket props from the request body JSON + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&data); err != nil { + return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest} + } + name := data.Name + versioning := false + if data.Versioning != nil { + versioning = data.Versioning.Enabled + } + defaultEventBasedHold := data.DefaultEventBasedHold + if err := validateBucketName(name); err != nil { + return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest} + } + + _, err := s.backend.GetBucket(name) + if err == nil { + return jsonResponse{ + errorMessage: fmt.Sprintf( + "A Cloud Storage bucket named '%s' already exists. "+ + "Try another name. Bucket names must be globally unique "+ + "across all Google Cloud projects, including those "+ + "outside of your organization.", name), + status: http.StatusConflict, + } + } + + // Create the named bucket + if err := s.backend.CreateBucket(name, backend.BucketAttrs{VersioningEnabled: versioning, DefaultEventBasedHold: defaultEventBasedHold}); err != nil { + return jsonResponse{errorMessage: err.Error()} + } + + // Return the created bucket: + bucket, err := s.backend.GetBucket(name) + if err != nil { + return jsonResponse{errorMessage: err.Error()} + } + return jsonResponse{data: newBucketResponse(bucket, s.options.BucketsLocation)} } -func (s *Server) listBuckets(w http.ResponseWriter, r *http.Request) { - s.mtx.RLock() - defer s.mtx.RUnlock() +func (s *Server) listBuckets(r *http.Request) jsonResponse { + buckets, err := s.backend.ListBuckets() + if err != nil { + return jsonResponse{errorMessage: err.Error()} + } + return jsonResponse{data: newListBucketsResponse(buckets, s.options.BucketsLocation)} +} - bucketNames, err := s.backend.ListBuckets() +func (s *Server) getBucket(r *http.Request) jsonResponse { + bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] + bucket, err := s.backend.GetBucket(bucketName) + if err != nil { + return jsonResponse{status: http.StatusNotFound} + } + return jsonResponse{data: newBucketResponse(bucket, s.options.BucketsLocation)} +} + +func (s *Server) deleteBucket(r *http.Request) jsonResponse { + bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] + err := s.backend.DeleteBucket(bucketName) + if err == backend.BucketNotFound { + return jsonResponse{status: http.StatusNotFound} + } + if err == backend.BucketNotEmpty { + return jsonResponse{status: http.StatusPreconditionFailed, errorMessage: err.Error()} + } if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + return jsonResponse{status: http.StatusInternalServerError, errorMessage: err.Error()} } - resp := newListBucketsResponse(bucketNames) - json.NewEncoder(w).Encode(resp) + return jsonResponse{} } -func (s *Server) getBucket(w http.ResponseWriter, r *http.Request) { - bucketName := mux.Vars(r)["bucketName"] - s.mtx.RLock() - defer s.mtx.RUnlock() - encoder := json.NewEncoder(w) - if err := s.backend.GetBucket(bucketName); err != nil { - w.WriteHeader(http.StatusNotFound) - err := newErrorResponse(http.StatusNotFound, "Not found", nil) - encoder.Encode(err) - return +func validateBucketName(bucketName string) error { + if !bucketRegexp.MatchString(bucketName) { + return errors.New("invalid bucket name") } - resp := newBucketResponse(bucketName) - w.WriteHeader(http.StatusOK) - encoder.Encode(resp) + return nil } diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go new file mode 100644 index 0000000000000..a57d154279a5e --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/config.go @@ -0,0 +1,30 @@ +package fakestorage + +import ( + "encoding/json" + "net/http" +) + +func (s *Server) updateServerConfig(r *http.Request) jsonResponse { + var configOptions struct { + ExternalUrl string `json:"externalUrl,omitempty"` + PublicHost string `json:"publicHost,omitempty"` + } + err := json.NewDecoder(r.Body).Decode(&configOptions) + if err != nil { + return jsonResponse{ + status: http.StatusBadRequest, + errorMessage: "Update server config payload can not be parsed.", + } + } + + if configOptions.ExternalUrl != "" { + s.externalURL = configOptions.ExternalUrl + } + + if configOptions.PublicHost != "" { + s.publicHost = configOptions.PublicHost + } + + return jsonResponse{status: http.StatusOK} +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go new file mode 100644 index 0000000000000..99e8ce7d4ccab --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/json_response.go @@ -0,0 +1,84 @@ +package fakestorage + +import ( + "encoding/json" + "errors" + "net/http" + "os" + "syscall" + + "github.com/fsouza/fake-gcs-server/internal/backend" +) + +type jsonResponse struct { + status int + header http.Header + data any + errorMessage string +} + +type jsonHandler = func(r *http.Request) jsonResponse + +func jsonToHTTPHandler(h jsonHandler) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + resp := h(r) + w.Header().Set("Content-Type", "application/json") + for name, values := range resp.header { + for _, value := range values { + w.Header().Add(name, value) + } + } + + status := resp.getStatus() + var data any + if status > 399 { + data = newErrorResponse(status, resp.getErrorMessage(status), resp.getErrorList(status)) + } else { + data = resp.data + } + + w.WriteHeader(status) + json.NewEncoder(w).Encode(data) + } +} + +func (r *jsonResponse) getStatus() int { + if r.status > 0 { + return r.status + } + if r.errorMessage != "" { + return http.StatusInternalServerError + } + return http.StatusOK +} + +func (r *jsonResponse) getErrorMessage(status int) string { + if r.errorMessage != "" { + return r.errorMessage + } + return http.StatusText(status) +} + +func (r *jsonResponse) getErrorList(status int) []apiError { + if status == http.StatusOK { + return nil + } else { + return []apiError{{ + Domain: "global", + Reason: http.StatusText(status), + Message: r.getErrorMessage(status), + }} + } +} + +func errToJsonResponse(err error) jsonResponse { + status := 0 + var pathError *os.PathError + if errors.As(err, &pathError) && pathError.Err == syscall.ENAMETOOLONG { + status = http.StatusBadRequest + } + if err == backend.PreConditionFailed { + status = http.StatusPreconditionFailed + } + return jsonResponse{errorMessage: err.Error(), status: status} +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go index afaa2efeac76a..b228c787ae682 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/mux_tranport.go @@ -7,16 +7,14 @@ package fakestorage import ( "net/http" "net/http/httptest" - - "github.com/gorilla/mux" ) type muxTransport struct { - router *mux.Router + handler http.Handler } func (t *muxTransport) RoundTrip(r *http.Request) (*http.Response, error) { w := httptest.NewRecorder() - t.router.ServeHTTP(w, r) + t.handler.ServeHTTP(w, r) return w.Result(), nil } diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go index bc1d472f36e30..b229a452331e6 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/object.go @@ -5,84 +5,357 @@ package fakestorage import ( + "bytes" + "compress/gzip" "encoding/json" + "encoding/xml" + "errors" "fmt" + "io" "net/http" + "slices" "sort" "strconv" "strings" + "time" + "cloud.google.com/go/storage" "github.com/fsouza/fake-gcs-server/internal/backend" + "github.com/fsouza/fake-gcs-server/internal/notification" "github.com/gorilla/mux" ) -// Object represents the object that is stored within the fake server. -type Object struct { - BucketName string `json:"-"` - Name string `json:"name"` - Content []byte `json:"-"` +var errInvalidGeneration = errors.New("invalid generation ID") + +// ObjectAttrs returns only the meta-data about an object without its contents. +type ObjectAttrs struct { + BucketName string + Name string + Size int64 + ContentType string + ContentEncoding string + ContentDisposition string + CacheControl string // Crc32c checksum of Content. calculated by server when it's upload methods are used. - Crc32c string `json:"crc32c,omitempty"` - Md5Hash string `json:"md5hash,omitempty"` + Crc32c string + Md5Hash string + Etag string + ACL []storage.ACLRule + // Dates and generation can be manually injected, so you can do assertions on them, + // or let us fill these fields for you + Created time.Time + Updated time.Time + Deleted time.Time + CustomTime time.Time + Generation int64 + Metadata map[string]string } -func (o *Object) id() string { +func (o *ObjectAttrs) id() string { return o.BucketName + "/" + o.Name } -type objectList []Object +type jsonObject struct { + BucketName string `json:"bucket"` + Name string `json:"name"` + Size int64 `json:"size,string"` + ContentType string `json:"contentType"` + ContentEncoding string `json:"contentEncoding"` + ContentDisposition string `json:"contentDisposition"` + Crc32c string `json:"crc32c,omitempty"` + Md5Hash string `json:"md5Hash,omitempty"` + Etag string `json:"etag,omitempty"` + ACL []aclRule `json:"acl,omitempty"` + Created time.Time `json:"created,omitempty"` + Updated time.Time `json:"updated,omitempty"` + Deleted time.Time `json:"deleted,omitempty"` + CustomTime time.Time `json:"customTime,omitempty"` + Generation int64 `json:"generation,omitempty,string"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// MarshalJSON for ObjectAttrs to use ACLRule instead of storage.ACLRule +func (o ObjectAttrs) MarshalJSON() ([]byte, error) { + temp := jsonObject{ + BucketName: o.BucketName, + Name: o.Name, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + Size: o.Size, + Crc32c: o.Crc32c, + Md5Hash: o.Md5Hash, + Etag: o.Etag, + Created: o.Created, + Updated: o.Updated, + Deleted: o.Deleted, + CustomTime: o.CustomTime, + Generation: o.Generation, + Metadata: o.Metadata, + } + temp.ACL = make([]aclRule, len(o.ACL)) + for i, ACL := range o.ACL { + temp.ACL[i] = aclRule(ACL) + } + return json.Marshal(temp) +} + +// UnmarshalJSON for ObjectAttrs to use ACLRule instead of storage.ACLRule +func (o *ObjectAttrs) UnmarshalJSON(data []byte) error { + var temp jsonObject + if err := json.Unmarshal(data, &temp); err != nil { + return err + } + o.BucketName = temp.BucketName + o.Name = temp.Name + o.ContentType = temp.ContentType + o.ContentEncoding = temp.ContentEncoding + o.ContentDisposition = temp.ContentDisposition + o.Size = temp.Size + o.Crc32c = temp.Crc32c + o.Md5Hash = temp.Md5Hash + o.Etag = temp.Etag + o.Created = temp.Created + o.Updated = temp.Updated + o.Deleted = temp.Deleted + o.Generation = temp.Generation + o.Metadata = temp.Metadata + o.CustomTime = temp.CustomTime + o.ACL = make([]storage.ACLRule, len(temp.ACL)) + for i, ACL := range temp.ACL { + o.ACL[i] = storage.ACLRule(ACL) + } + + return nil +} + +// Object represents an object that is stored within the fake server. The +// content of this type is stored is buffered, i.e. it's stored in memory. +// Use StreamingObject to stream the content from a reader, e.g a file. +type Object struct { + ObjectAttrs + Content []byte `json:"-"` +} + +type noopSeekCloser struct { + io.ReadSeeker +} + +func (n noopSeekCloser) Close() error { + return nil +} + +func (o Object) StreamingObject() StreamingObject { + return StreamingObject{ + ObjectAttrs: o.ObjectAttrs, + Content: noopSeekCloser{bytes.NewReader(o.Content)}, + } +} + +// StreamingObject is the streaming version of Object. +type StreamingObject struct { + ObjectAttrs + Content io.ReadSeekCloser `json:"-"` +} + +func (o *StreamingObject) Close() error { + if o != nil && o.Content != nil { + return o.Content.Close() + } + return nil +} + +func (o *StreamingObject) BufferedObject() (Object, error) { + data, err := io.ReadAll(o.Content) + return Object{ + ObjectAttrs: o.ObjectAttrs, + Content: data, + }, err +} + +// ACLRule is an alias of storage.ACLRule to have custom JSON marshal +type aclRule storage.ACLRule + +// ProjectTeam is an alias of storage.ProjectTeam to have custom JSON marshal +type projectTeam storage.ProjectTeam + +// MarshalJSON for ACLRule to customize field names +func (acl aclRule) MarshalJSON() ([]byte, error) { + temp := struct { + Entity storage.ACLEntity `json:"entity"` + EntityID string `json:"entityId"` + Role storage.ACLRole `json:"role"` + Domain string `json:"domain"` + Email string `json:"email"` + ProjectTeam *projectTeam `json:"projectTeam"` + }{ + Entity: acl.Entity, + EntityID: acl.EntityID, + Role: acl.Role, + Domain: acl.Domain, + Email: acl.Email, + ProjectTeam: (*projectTeam)(acl.ProjectTeam), + } + return json.Marshal(temp) +} -func (o objectList) Len() int { - return len(o) +// UnmarshalJSON for ACLRule to customize field names +func (acl *aclRule) UnmarshalJSON(data []byte) error { + temp := struct { + Entity storage.ACLEntity `json:"entity"` + EntityID string `json:"entityId"` + Role storage.ACLRole `json:"role"` + Domain string `json:"domain"` + Email string `json:"email"` + ProjectTeam *projectTeam `json:"projectTeam"` + }{} + if err := json.Unmarshal(data, &temp); err != nil { + return err + } + acl.Entity = temp.Entity + acl.EntityID = temp.EntityID + acl.Role = temp.Role + acl.Domain = temp.Domain + acl.Email = temp.Email + acl.ProjectTeam = (*storage.ProjectTeam)(temp.ProjectTeam) + return nil } -func (o objectList) Less(i int, j int) bool { - return o[i].Name < o[j].Name +// MarshalJSON for ProjectTeam to customize field names +func (team projectTeam) MarshalJSON() ([]byte, error) { + temp := struct { + ProjectNumber string `json:"projectNumber"` + Team string `json:"team"` + }{ + ProjectNumber: team.ProjectNumber, + Team: team.Team, + } + return json.Marshal(temp) } -func (o *objectList) Swap(i int, j int) { - d := *o - d[i], d[j] = d[j], d[i] +// UnmarshalJSON for ProjectTeam to customize field names +func (team *projectTeam) UnmarshalJSON(data []byte) error { + temp := struct { + ProjectNumber string `json:"projectNumber"` + Team string `json:"team"` + }{} + if err := json.Unmarshal(data, &temp); err != nil { + return err + } + team.ProjectNumber = temp.ProjectNumber + team.Team = temp.Team + return nil } -// CreateObject stores the given object internally. +// CreateObject is the non-streaming version of CreateObjectStreaming. // -// If the bucket within the object doesn't exist, it also creates it. If the -// object already exists, it overrides the object. +// In addition to streaming, CreateObjectStreaming returns an error instead of +// panicking when an error occurs. func (s *Server) CreateObject(obj Object) { - s.mtx.Lock() - defer s.mtx.Unlock() - err := s.createObject(obj) + err := s.CreateObjectStreaming(obj.StreamingObject()) if err != nil { panic(err) } } -func (s *Server) createObject(obj Object) error { - return s.backend.CreateObject(toBackendObjects([]Object{obj})[0]) +// CreateObjectStreaming stores the given object internally. +// +// If the bucket within the object doesn't exist, it also creates it. If the +// object already exists, it overwrites the object. +func (s *Server) CreateObjectStreaming(obj StreamingObject) error { + obj, err := s.createObject(obj, backend.NoConditions{}) + if err != nil { + return err + } + obj.Close() + return nil +} + +func (s *Server) createObject(obj StreamingObject, conditions backend.Conditions) (StreamingObject, error) { + oldBackendObj, err := s.backend.GetObject(obj.BucketName, obj.Name) + // Calling Close before checking err is okay on objects, and the object + // may need to be closed whether or not there's an error. + defer oldBackendObj.Close() //lint:ignore SA5001 // see above + + prevVersionExisted := err == nil + + // The caller is responsible for closing the created object. + newBackendObj, err := s.backend.CreateObject(toBackendObjects([]StreamingObject{obj})[0], conditions) + if err != nil { + return StreamingObject{}, err + } + + var newObjEventAttr map[string]string + if prevVersionExisted { + newObjEventAttr = map[string]string{ + "overwroteGeneration": strconv.FormatInt(oldBackendObj.Generation, 10), + } + + oldObjEventAttr := map[string]string{ + "overwrittenByGeneration": strconv.FormatInt(newBackendObj.Generation, 10), + } + + bucket, _ := s.backend.GetBucket(obj.BucketName) + if bucket.VersioningEnabled { + s.eventManager.Trigger(&oldBackendObj, notification.EventArchive, oldObjEventAttr) + } else { + s.eventManager.Trigger(&oldBackendObj, notification.EventDelete, oldObjEventAttr) + } + } + + newObj := fromBackendObjects([]backend.StreamingObject{newBackendObj})[0] + s.eventManager.Trigger(&newBackendObj, notification.EventFinalize, newObjEventAttr) + return newObj, nil +} + +type ListOptions struct { + Prefix string + Delimiter string + Versions bool + StartOffset string + EndOffset string + IncludeTrailingDelimiter bool } // ListObjects returns a sorted list of objects that match the given criteria, // or an error if the bucket doesn't exist. -func (s *Server) ListObjects(bucketName, prefix, delimiter string) ([]Object, []string, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - backendObjects, err := s.backend.ListObjects(bucketName) +// +// Deprecated: use ListObjectsWithOptions. +func (s *Server) ListObjects(bucketName, prefix, delimiter string, versions bool) ([]ObjectAttrs, []string, error) { + return s.ListObjectsWithOptions(bucketName, ListOptions{ + Prefix: prefix, + Delimiter: delimiter, + Versions: versions, + }) +} + +func (s *Server) ListObjectsWithOptions(bucketName string, options ListOptions) ([]ObjectAttrs, []string, error) { + backendObjects, err := s.backend.ListObjects(bucketName, options.Prefix, options.Versions) if err != nil { return nil, nil, err } - objects := fromBackendObjects(backendObjects) - olist := objectList(objects) - sort.Sort(&olist) - var respObjects []Object + objects := fromBackendObjectsAttrs(backendObjects) + slices.SortFunc(objects, func(left, right ObjectAttrs) int { + return strings.Compare(left.Name, right.Name) + }) + var respObjects []ObjectAttrs prefixes := make(map[string]bool) - for _, obj := range olist { - if strings.HasPrefix(obj.Name, prefix) { - objName := strings.Replace(obj.Name, prefix, "", 1) - delimPos := strings.Index(objName, delimiter) - if delimiter != "" && delimPos > -1 { - prefixes[obj.Name[:len(prefix)+delimPos+1]] = true - } else { + for _, obj := range objects { + if !strings.HasPrefix(obj.Name, options.Prefix) { + continue + } + objName := strings.Replace(obj.Name, options.Prefix, "", 1) + delimPos := strings.Index(objName, options.Delimiter) + if options.Delimiter != "" && delimPos > -1 { + prefix := obj.Name[:len(options.Prefix)+delimPos+1] + if isInOffset(prefix, options.StartOffset, options.EndOffset) { + prefixes[prefix] = true + } + if options.IncludeTrailingDelimiter && obj.Name == prefix { + respObjects = append(respObjects, obj) + } + } else { + if isInOffset(obj.Name, options.StartOffset, options.EndOffset) { respObjects = append(respObjects, obj) } } @@ -95,143 +368,818 @@ func (s *Server) ListObjects(bucketName, prefix, delimiter string) ([]Object, [] return respObjects, respPrefixes, nil } -func toBackendObjects(objects []Object) []backend.Object { - backendObjects := []backend.Object{} +func isInOffset(name, startOffset, endOffset string) bool { + if endOffset != "" && startOffset != "" { + return strings.Compare(name, endOffset) < 0 && strings.Compare(name, startOffset) >= 0 + } else if endOffset != "" { + return strings.Compare(name, endOffset) < 0 + } else if startOffset != "" { + return strings.Compare(name, startOffset) >= 0 + } else { + return true + } +} + +func getCurrentIfZero(date time.Time) time.Time { + if date.IsZero() { + return time.Now() + } + return date +} + +func toBackendObjects(objects []StreamingObject) []backend.StreamingObject { + backendObjects := make([]backend.StreamingObject, 0, len(objects)) for _, o := range objects { - backendObjects = append(backendObjects, backend.Object{ - BucketName: o.BucketName, - Name: o.Name, - Content: o.Content, - Crc32c: o.Crc32c, - Md5Hash: o.Md5Hash, + backendObjects = append(backendObjects, backend.StreamingObject{ + ObjectAttrs: backend.ObjectAttrs{ + BucketName: o.BucketName, + Name: o.Name, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + CacheControl: o.CacheControl, + ACL: o.ACL, + Created: getCurrentIfZero(o.Created).Format(timestampFormat), + Deleted: o.Deleted.Format(timestampFormat), + Updated: getCurrentIfZero(o.Updated).Format(timestampFormat), + CustomTime: o.CustomTime.Format(timestampFormat), + Generation: o.Generation, + Metadata: o.Metadata, + }, + Content: o.Content, }) } return backendObjects } -func fromBackendObjects(objects []backend.Object) []Object { - backendObjects := []Object{} +func bufferedObjectsToBackendObjects(objects []Object) []backend.StreamingObject { + backendObjects := make([]backend.StreamingObject, 0, len(objects)) + for _, bufferedObject := range objects { + o := bufferedObject.StreamingObject() + backendObjects = append(backendObjects, backend.StreamingObject{ + ObjectAttrs: backend.ObjectAttrs{ + BucketName: o.BucketName, + Name: o.Name, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + ACL: o.ACL, + Created: getCurrentIfZero(o.Created).Format(timestampFormat), + Deleted: o.Deleted.Format(timestampFormat), + Updated: getCurrentIfZero(o.Updated).Format(timestampFormat), + CustomTime: o.CustomTime.Format(timestampFormat), + Generation: o.Generation, + Metadata: o.Metadata, + Crc32c: o.Crc32c, + Md5Hash: o.Md5Hash, + Size: o.Size, + Etag: o.Etag, + }, + Content: o.Content, + }) + } + return backendObjects +} + +func fromBackendObjects(objects []backend.StreamingObject) []StreamingObject { + backendObjects := make([]StreamingObject, 0, len(objects)) for _, o := range objects { - backendObjects = append(backendObjects, Object{ - BucketName: o.BucketName, - Name: o.Name, - Content: o.Content, - Crc32c: o.Crc32c, - Md5Hash: o.Md5Hash, + backendObjects = append(backendObjects, StreamingObject{ + ObjectAttrs: ObjectAttrs{ + BucketName: o.BucketName, + Name: o.Name, + Size: o.Size, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + CacheControl: o.CacheControl, + Crc32c: o.Crc32c, + Md5Hash: o.Md5Hash, + Etag: o.Etag, + ACL: o.ACL, + Created: convertTimeWithoutError(o.Created), + Deleted: convertTimeWithoutError(o.Deleted), + Updated: convertTimeWithoutError(o.Updated), + CustomTime: convertTimeWithoutError(o.CustomTime), + Generation: o.Generation, + Metadata: o.Metadata, + }, + Content: o.Content, }) } return backendObjects } -// GetObject returns the object with the given name in the given bucket, or an -// error if the object doesn't exist. +func fromBackendObjectsAttrs(objectAttrs []backend.ObjectAttrs) []ObjectAttrs { + oattrs := make([]ObjectAttrs, 0, len(objectAttrs)) + for _, o := range objectAttrs { + oattrs = append(oattrs, ObjectAttrs{ + BucketName: o.BucketName, + Name: o.Name, + Size: o.Size, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + CacheControl: o.CacheControl, + Crc32c: o.Crc32c, + Md5Hash: o.Md5Hash, + Etag: o.Etag, + ACL: o.ACL, + Created: convertTimeWithoutError(o.Created), + Deleted: convertTimeWithoutError(o.Deleted), + Updated: convertTimeWithoutError(o.Updated), + CustomTime: convertTimeWithoutError(o.CustomTime), + Generation: o.Generation, + Metadata: o.Metadata, + }) + } + return oattrs +} + +func convertTimeWithoutError(t string) time.Time { + r, _ := time.Parse(timestampFormat, t) + return r +} + +// GetObject is the non-streaming version of GetObjectStreaming. func (s *Server) GetObject(bucketName, objectName string) (Object, error) { + streamingObject, err := s.GetObjectStreaming(bucketName, objectName) + if err != nil { + return Object{}, err + } + return streamingObject.BufferedObject() +} + +// GetObjectStreaming returns the object with the given name in the given +// bucket, or an error if the object doesn't exist. +func (s *Server) GetObjectStreaming(bucketName, objectName string) (StreamingObject, error) { backendObj, err := s.backend.GetObject(bucketName, objectName) + if err != nil { + return StreamingObject{}, err + } + obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0] + return obj, nil +} + +// GetObjectWithGeneration is the non-streaming version of +// GetObjectWithGenerationStreaming. +func (s *Server) GetObjectWithGeneration(bucketName, objectName string, generation int64) (Object, error) { + streamingObject, err := s.GetObjectWithGenerationStreaming(bucketName, objectName, generation) if err != nil { return Object{}, err } - obj := fromBackendObjects([]backend.Object{backendObj})[0] + return streamingObject.BufferedObject() +} + +// GetObjectWithGenerationStreaming returns the object with the given name and +// given generation ID in the given bucket, or an error if the object doesn't +// exist. +// +// If versioning is enabled, archived versions are considered. +func (s *Server) GetObjectWithGenerationStreaming(bucketName, objectName string, generation int64) (StreamingObject, error) { + backendObj, err := s.backend.GetObjectWithGeneration(bucketName, objectName, generation) + if err != nil { + return StreamingObject{}, err + } + obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0] return obj, nil } -func (s *Server) listObjects(w http.ResponseWriter, r *http.Request) { - bucketName := mux.Vars(r)["bucketName"] - prefix := r.URL.Query().Get("prefix") - delimiter := r.URL.Query().Get("delimiter") - objs, prefixes, err := s.ListObjects(bucketName, prefix, delimiter) - encoder := json.NewEncoder(w) +func (s *Server) objectWithGenerationOnValidGeneration(bucketName, objectName, generationStr string) (StreamingObject, error) { + generation, err := strconv.ParseInt(generationStr, 10, 64) + if err != nil && generationStr != "" { + return StreamingObject{}, errInvalidGeneration + } else if generation > 0 { + return s.GetObjectWithGenerationStreaming(bucketName, objectName, generation) + } + return s.GetObjectStreaming(bucketName, objectName) +} + +func (s *Server) listObjects(r *http.Request) jsonResponse { + bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] + objs, prefixes, err := s.ListObjectsWithOptions(bucketName, ListOptions{ + Prefix: r.URL.Query().Get("prefix"), + Delimiter: r.URL.Query().Get("delimiter"), + Versions: r.URL.Query().Get("versions") == "true", + StartOffset: r.URL.Query().Get("startOffset"), + EndOffset: r.URL.Query().Get("endOffset"), + IncludeTrailingDelimiter: r.URL.Query().Get("includeTrailingDelimiter") == "true", + }) if err != nil { - w.WriteHeader(http.StatusNotFound) - errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil) - encoder.Encode(errResp) - return + return jsonResponse{status: http.StatusNotFound} } - encoder.Encode(newListObjectsResponse(objs, prefixes)) + return jsonResponse{data: newListObjectsResponse(objs, prefixes, s.externalURL)} } -func (s *Server) getObject(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - encoder := json.NewEncoder(w) - obj, err := s.GetObject(vars["bucketName"], vars["objectName"]) +func (s *Server) xmlListObjects(r *http.Request) xmlResponse { + bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] + + opts := ListOptions{ + Prefix: r.URL.Query().Get("prefix"), + Delimiter: r.URL.Query().Get("delimiter"), + Versions: r.URL.Query().Get("versions") == "true", + } + + objs, prefixes, err := s.ListObjectsWithOptions(bucketName, opts) if err != nil { - errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil) - w.WriteHeader(http.StatusNotFound) - encoder.Encode(errResp) + return xmlResponse{ + status: http.StatusInternalServerError, + errorMessage: err.Error(), + } + } + + result := ListBucketResult{ + Name: bucketName, + Delimiter: opts.Delimiter, + Prefix: opts.Prefix, + KeyCount: len(objs), + } + + if opts.Delimiter != "" { + for _, prefix := range prefixes { + result.CommonPrefixes = append(result.CommonPrefixes, CommonPrefix{Prefix: prefix}) + } + } + + for _, obj := range objs { + result.Contents = append(result.Contents, Contents{ + Key: obj.Name, + Generation: obj.Generation, + Size: obj.Size, + LastModified: obj.Updated.Format(time.RFC3339), + ETag: ETag{Value: obj.Etag}, + }) + } + + raw, err := xml.Marshal(result) + if err != nil { + return xmlResponse{ + status: http.StatusInternalServerError, + errorMessage: err.Error(), + } + } + + return xmlResponse{ + status: http.StatusOK, + data: []byte(xml.Header + string(raw)), + } +} + +func (s *Server) getObject(w http.ResponseWriter, r *http.Request) { + if alt := r.URL.Query().Get("alt"); alt == "media" || r.Method == http.MethodHead { + s.downloadObject(w, r) return } - w.Header().Set("Accept-Ranges", "bytes") - encoder.Encode(newObjectResponse(obj)) + + handler := jsonToHTTPHandler(func(r *http.Request) jsonResponse { + vars := unescapeMuxVars(mux.Vars(r)) + + projection := storage.ProjectionNoACL + if r.URL.Query().Has("projection") { + switch value := strings.ToLower(r.URL.Query().Get("projection")); value { + case "full": + projection = storage.ProjectionFull + case "noacl": + projection = storage.ProjectionNoACL + default: + return jsonResponse{ + status: http.StatusBadRequest, + errorMessage: fmt.Sprintf("invalid projection: %q", value), + } + } + } + + obj, err := s.objectWithGenerationOnValidGeneration(vars["bucketName"], vars["objectName"], r.FormValue("generation")) + // Calling Close before checking err is okay on objects, and the object + // may need to be closed whether or not there's an error. + defer obj.Close() //lint:ignore SA5001 // see above + if err != nil { + statusCode := http.StatusNotFound + var errMessage string + if errors.Is(err, errInvalidGeneration) { + statusCode = http.StatusBadRequest + errMessage = err.Error() + } + return jsonResponse{ + status: statusCode, + errorMessage: errMessage, + } + } + header := make(http.Header) + header.Set("Accept-Ranges", "bytes") + return jsonResponse{ + header: header, + data: newProjectedObjectResponse(obj.ObjectAttrs, s.externalURL, projection), + } + }) + + handler(w, r) } -func (s *Server) deleteObject(w http.ResponseWriter, r *http.Request) { - s.mtx.Lock() - defer s.mtx.Unlock() - vars := mux.Vars(r) - err := s.backend.DeleteObject(vars["bucketName"], vars["objectName"]) +func (s *Server) deleteObject(r *http.Request) jsonResponse { + vars := unescapeMuxVars(mux.Vars(r)) + obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"]) + // Calling Close before checking err is okay on objects, and the object + // may need to be closed whether or not there's an error. + defer obj.Close() //lint:ignore SA5001 // see above + if err == nil { + err = s.backend.DeleteObject(vars["bucketName"], vars["objectName"]) + } if err != nil { - errResp := newErrorResponse(http.StatusNotFound, "Not Found", nil) - w.WriteHeader(http.StatusNotFound) - json.NewEncoder(w).Encode(errResp) - return + return jsonResponse{status: http.StatusNotFound} } - w.WriteHeader(http.StatusOK) + bucket, _ := s.backend.GetBucket(obj.BucketName) + backendObj := toBackendObjects([]StreamingObject{obj})[0] + if bucket.VersioningEnabled { + s.eventManager.Trigger(&backendObj, notification.EventArchive, nil) + } else { + s.eventManager.Trigger(&backendObj, notification.EventDelete, nil) + } + return jsonResponse{} } -func (s *Server) rewriteObject(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - obj, err := s.GetObject(vars["sourceBucket"], vars["sourceObject"]) +func (s *Server) listObjectACL(r *http.Request) jsonResponse { + vars := unescapeMuxVars(mux.Vars(r)) + + obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"]) if err != nil { - http.Error(w, "not found", http.StatusNotFound) - return + return jsonResponse{status: http.StatusNotFound} + } + defer obj.Close() + + return jsonResponse{data: newACLListResponse(obj.ObjectAttrs)} +} + +func (s *Server) setObjectACL(r *http.Request) jsonResponse { + vars := unescapeMuxVars(mux.Vars(r)) + + obj, err := s.GetObjectStreaming(vars["bucketName"], vars["objectName"]) + if err != nil { + return jsonResponse{status: http.StatusNotFound} + } + defer obj.Close() + + var data struct { + Entity string + Role string + } + + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(&data); err != nil { + return jsonResponse{ + status: http.StatusBadRequest, + errorMessage: err.Error(), + } + } + + entity := storage.ACLEntity(data.Entity) + role := storage.ACLRole(data.Role) + obj.ACL = []storage.ACLRule{{ + Entity: entity, + Role: role, + }} + + obj, err = s.createObject(obj, backend.NoConditions{}) + if err != nil { + return errToJsonResponse(err) + } + defer obj.Close() + + return jsonResponse{data: newACLListResponse(obj.ObjectAttrs)} +} + +func (s *Server) rewriteObject(r *http.Request) jsonResponse { + vars := unescapeMuxVars(mux.Vars(r)) + obj, err := s.objectWithGenerationOnValidGeneration(vars["sourceBucket"], vars["sourceObject"], r.FormValue("sourceGeneration")) + // Calling Close before checking err is okay on objects, and the object + // may need to be closed whether or not there's an error. + defer obj.Close() //lint:ignore SA5001 // see above + if err != nil { + statusCode := http.StatusNotFound + var errMessage string + if errors.Is(err, errInvalidGeneration) { + statusCode = http.StatusBadRequest + errMessage = err.Error() + } + return jsonResponse{errorMessage: errMessage, status: statusCode} + } + + var metadata multipartMetadata + err = json.NewDecoder(r.Body).Decode(&metadata) + if err != nil && err != io.EOF { // The body is optional + return jsonResponse{errorMessage: "Invalid metadata", status: http.StatusBadRequest} + } + + // Only supplied metadata overwrites the new object's metdata + if len(metadata.Metadata) == 0 { + metadata.Metadata = obj.Metadata + } + if metadata.ContentType == "" { + metadata.ContentType = obj.ContentType + } + if metadata.ContentEncoding == "" { + metadata.ContentEncoding = obj.ContentEncoding + } + if metadata.ContentDisposition == "" { + metadata.ContentDisposition = obj.ContentDisposition } + dstBucket := vars["destinationBucket"] - newObject := Object{ - BucketName: dstBucket, - Name: vars["destinationObject"], - Content: append([]byte(nil), obj.Content...), - Crc32c: obj.Crc32c, - Md5Hash: obj.Md5Hash, + newObject := StreamingObject{ + ObjectAttrs: ObjectAttrs{ + BucketName: dstBucket, + Name: vars["destinationObject"], + ACL: obj.ACL, + ContentType: metadata.ContentType, + ContentEncoding: metadata.ContentEncoding, + ContentDisposition: metadata.ContentDisposition, + Metadata: metadata.Metadata, + }, + Content: obj.Content, } - s.CreateObject(newObject) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(newObjectRewriteResponse(newObject)) + + created, err := s.createObject(newObject, backend.NoConditions{}) + if err != nil { + return errToJsonResponse(err) + } + defer created.Close() + + if vars["copyType"] == "copyTo" { + return jsonResponse{data: newObjectResponse(created.ObjectAttrs, s.externalURL)} + } + return jsonResponse{data: newObjectRewriteResponse(created.ObjectAttrs, s.externalURL)} } func (s *Server) downloadObject(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - obj, err := s.GetObject(vars["bucketName"], vars["objectName"]) + vars := unescapeMuxVars(mux.Vars(r)) + obj, err := s.objectWithGenerationOnValidGeneration(vars["bucketName"], vars["objectName"], r.FormValue("generation")) + // Calling Close before checking err is okay on objects, and the object + // may need to be closed whether or not there's an error. + defer obj.Close() //lint:ignore SA5001 // see above if err != nil { - http.Error(w, "not found", http.StatusNotFound) + statusCode := http.StatusNotFound + message := http.StatusText(statusCode) + if errors.Is(err, errInvalidGeneration) { + statusCode = http.StatusBadRequest + message = err.Error() + } + http.Error(w, message, statusCode) return } + + var content io.Reader + content = obj.Content status := http.StatusOK - start, end, content := s.handleRange(obj, r) - if len(content) != len(obj.Content) { + + transcoded := false + ranged := false + start := int64(0) + lastByte := int64(0) + satisfiable := true + contentLength := int64(0) + + handledTranscoding := func() bool { + // This should also be false if the Cache-Control metadata field == "no-transform", + // but we don't currently support that field. + // See https://cloud.google.com/storage/docs/transcoding + + if obj.ContentEncoding == "gzip" && !strings.Contains(r.Header.Get("accept-encoding"), "gzip") { + // GCS will transparently decompress gzipped content, see + // https://cloud.google.com/storage/docs/transcoding + // In this case, any Range header is ignored and the full content is returned. + + // If the content is not a valid gzip file, ignore errors and continue + // without transcoding. Otherwise, return decompressed content. + gzipReader, err := gzip.NewReader(content) + if err == nil { + rawContent, err := io.ReadAll(gzipReader) + if err == nil { + transcoded = true + content = bytes.NewReader(rawContent) + contentLength = int64(len(rawContent)) + obj.Size = contentLength + return true + } + } + } + return false + } + + if !handledTranscoding() { + ranged, start, lastByte, satisfiable = s.handleRange(obj, r) + contentLength = lastByte - start + 1 + } + + if ranged && satisfiable { + _, err = obj.Content.Seek(start, io.SeekStart) + if err != nil { + http.Error(w, "could not seek", http.StatusInternalServerError) + return + } + content = io.LimitReader(obj.Content, contentLength) status = http.StatusPartialContent - w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, end, len(obj.Content))) + w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", start, lastByte, obj.Size)) } w.Header().Set("Accept-Ranges", "bytes") - w.Header().Set("Content-Length", strconv.Itoa(len(content))) + w.Header().Set("Content-Length", strconv.FormatInt(contentLength, 10)) + w.Header().Set("X-Goog-Generation", strconv.FormatInt(obj.Generation, 10)) + w.Header().Set("X-Goog-Hash", fmt.Sprintf("crc32c=%s,md5=%s", obj.Crc32c, obj.Md5Hash)) + w.Header().Set("Last-Modified", obj.Updated.Format(http.TimeFormat)) + w.Header().Set("ETag", fmt.Sprintf("%q", obj.Etag)) + for name, value := range obj.Metadata { + w.Header().Set("X-Goog-Meta-"+name, value) + } + w.Header().Set("Access-Control-Allow-Origin", "*") + + if ranged && !satisfiable { + status = http.StatusRequestedRangeNotSatisfiable + content = bytes.NewReader([]byte(fmt.Sprintf(``+ + `InvalidRange`+ + `The requested range cannot be satisfied.`+ + `
%s
`, r.Header.Get("Range")))) + w.Header().Set(contentTypeHeader, "application/xml; charset=UTF-8") + } else { + if obj.ContentType != "" { + w.Header().Set(contentTypeHeader, obj.ContentType) + } + if obj.CacheControl != "" { + w.Header().Set(cacheControlHeader, obj.CacheControl) + } + // If content was transcoded, the underlying encoding was removed so we shouldn't report it. + if obj.ContentEncoding != "" && !transcoded { + w.Header().Set("Content-Encoding", obj.ContentEncoding) + } + if obj.ContentDisposition != "" { + w.Header().Set("Content-Disposition", obj.ContentDisposition) + } + // X-Goog-Stored-Content-Encoding must be set to the original encoding, + // defaulting to "identity" if no encoding was set. + storedContentEncoding := "identity" + if obj.ContentEncoding != "" { + storedContentEncoding = obj.ContentEncoding + } + w.Header().Set("X-Goog-Stored-Content-Encoding", storedContentEncoding) + } + w.WriteHeader(status) if r.Method == http.MethodGet { - w.Write(content) + io.Copy(w, content) } } -func (s *Server) handleRange(obj Object, r *http.Request) (start, end int, content []byte) { - if reqRange := r.Header.Get("Range"); reqRange != "" { - parts := strings.SplitN(reqRange, "=", 2) - if len(parts) == 2 && parts[0] == "bytes" { - rangeParts := strings.SplitN(parts[1], "-", 2) - if len(rangeParts) == 2 { - start, _ = strconv.Atoi(rangeParts[0]) - end, _ = strconv.Atoi(rangeParts[1]) - if end < 1 { - end = len(obj.Content) - } - return start, end, obj.Content[start:end] +func (s *Server) handleRange(obj StreamingObject, r *http.Request) (ranged bool, start int64, lastByte int64, satisfiable bool) { + start, end, err := parseRange(r.Header.Get("Range"), obj.Size) + if err != nil { + // If the range isn't valid, GCS returns all content. + return false, 0, obj.Size - 1, false + } + // GCS is pretty flexible when it comes to invalid ranges. A 416 http + // response is only returned when the range start is beyond the length of + // the content. Otherwise, the range is ignored. + switch { + // Invalid start. Return 416 and NO content. + // Examples: + // Length: 40, Range: bytes=50-60 + // Length: 40, Range: bytes=50- + case start >= obj.Size: + // This IS a ranged request, but it ISN'T satisfiable. + return true, 0, 0, false + // Negative range, ignore range and return all content. + // Examples: + // Length: 40, Range: bytes=30-20 + case end < start: + return false, 0, obj.Size - 1, false + // Return range. Clamp start and end. + // Examples: + // Length: 40, Range: bytes=-100 + // Length: 40, Range: bytes=0-100 + default: + if start < 0 { + start = 0 + } + if end >= obj.Size { + end = obj.Size - 1 + } + return true, start, end, true + } +} + +// parseRange parses the range header and returns the corresponding start and +// end indices in the content. The end index is inclusive. This function +// doesn't validate that the start and end indices fall within the content +// bounds. The content length is only used to handle "suffix length" and +// range-to-end ranges. +func parseRange(rangeHeaderValue string, contentLength int64) (start int64, end int64, err error) { + // For information about the range header, see: + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Range + // https://httpwg.org/specs/rfc7233.html#header.range + // https://httpwg.org/specs/rfc7233.html#byte.ranges + // https://httpwg.org/specs/rfc7233.html#status.416 + // + // = + // + // The following ranges are parsed: + // "bytes=40-50" (range with given start and end) + // "bytes=40-" (range to end of content) + // "bytes=-40" (suffix length, offset from end of string) + // + // The unit MUST be "bytes". + parts := strings.SplitN(rangeHeaderValue, "=", 2) + if len(parts) != 2 { + return 0, 0, fmt.Errorf("expecting `=` in range header, got: %s", rangeHeaderValue) + } + if parts[0] != "bytes" { + return 0, 0, fmt.Errorf("invalid range unit, expecting `bytes`, got: %s", parts[0]) + } + rangeSpec := parts[1] + if len(rangeSpec) == 0 { + return 0, 0, errors.New("empty range") + } + if rangeSpec[0] == '-' { + offsetFromEnd, err := strconv.ParseInt(rangeSpec, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid suffix length, got: %s", rangeSpec) + } + start = contentLength + offsetFromEnd + end = contentLength - 1 + } else { + rangeParts := strings.SplitN(rangeSpec, "-", 2) + if len(rangeParts) != 2 { + return 0, 0, fmt.Errorf("only one range supported, got: %s", rangeSpec) + } + start, err = strconv.ParseInt(rangeParts[0], 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid range start, got: %s", rangeParts[0]) + } + if rangeParts[1] == "" { + end = contentLength - 1 + } else { + end, err = strconv.ParseInt(rangeParts[1], 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("invalid range end, got: %s", rangeParts[1]) } } } - return 0, 0, obj.Content + return start, end, nil +} + +func (s *Server) patchObject(r *http.Request) jsonResponse { + vars := unescapeMuxVars(mux.Vars(r)) + bucketName := vars["bucketName"] + objectName := vars["objectName"] + + type acls struct { + Entity string + Role string + } + + var payload struct { + ContentType string + ContentEncoding string + ContentDisposition string + Metadata map[string]string `json:"metadata"` + CustomTime string + Acl []acls + } + err := json.NewDecoder(r.Body).Decode(&payload) + if err != nil { + return jsonResponse{ + status: http.StatusBadRequest, + errorMessage: "Metadata in the request couldn't decode", + } + } + + var attrsToUpdate backend.ObjectAttrs + + attrsToUpdate.ContentType = payload.ContentType + attrsToUpdate.ContentEncoding = payload.ContentEncoding + attrsToUpdate.ContentDisposition = payload.ContentDisposition + attrsToUpdate.Metadata = payload.Metadata + attrsToUpdate.CustomTime = payload.CustomTime + + if len(payload.Acl) > 0 { + attrsToUpdate.ACL = []storage.ACLRule{} + for _, aclData := range payload.Acl { + newAcl := storage.ACLRule{Entity: storage.ACLEntity(aclData.Entity), Role: storage.ACLRole(aclData.Role)} + attrsToUpdate.ACL = append(attrsToUpdate.ACL, newAcl) + } + } + + backendObj, err := s.backend.PatchObject(bucketName, objectName, attrsToUpdate) + if err != nil { + return jsonResponse{ + status: http.StatusNotFound, + errorMessage: "Object not found to be PATCHed", + } + } + defer backendObj.Close() + + s.eventManager.Trigger(&backendObj, notification.EventMetadata, nil) + return jsonResponse{data: fromBackendObjects([]backend.StreamingObject{backendObj})[0]} +} + +func (s *Server) updateObject(r *http.Request) jsonResponse { + vars := unescapeMuxVars(mux.Vars(r)) + bucketName := vars["bucketName"] + objectName := vars["objectName"] + + type acls struct { + Entity string + Role string + } + + var payload struct { + Metadata map[string]string `json:"metadata"` + ContentType string `json:"contentType"` + ContentDisposition string `json:"contentDisposition"` + CustomTime string + Acl []acls + } + err := json.NewDecoder(r.Body).Decode(&payload) + if err != nil { + return jsonResponse{ + status: http.StatusBadRequest, + errorMessage: "Metadata in the request couldn't decode", + } + } + + var attrsToUpdate backend.ObjectAttrs + + attrsToUpdate.Metadata = payload.Metadata + attrsToUpdate.CustomTime = payload.CustomTime + attrsToUpdate.ContentType = payload.ContentType + attrsToUpdate.ContentDisposition = payload.ContentDisposition + if len(payload.Acl) > 0 { + attrsToUpdate.ACL = []storage.ACLRule{} + for _, aclData := range payload.Acl { + newAcl := storage.ACLRule{Entity: storage.ACLEntity(aclData.Entity), Role: storage.ACLRole(aclData.Role)} + attrsToUpdate.ACL = append(attrsToUpdate.ACL, newAcl) + } + } + backendObj, err := s.backend.UpdateObject(bucketName, objectName, attrsToUpdate) + if err != nil { + return jsonResponse{ + status: http.StatusNotFound, + errorMessage: "Object not found to be updated", + } + } + defer backendObj.Close() + + s.eventManager.Trigger(&backendObj, notification.EventMetadata, nil) + return jsonResponse{data: fromBackendObjects([]backend.StreamingObject{backendObj})[0]} +} + +func (s *Server) composeObject(r *http.Request) jsonResponse { + vars := unescapeMuxVars(mux.Vars(r)) + bucketName := vars["bucketName"] + destinationObject := vars["destinationObject"] + + var composeRequest struct { + SourceObjects []struct { + Name string + } + Destination struct { + Bucket string + ContentType string + ContentDisposition string + Metadata map[string]string + } + } + + decoder := json.NewDecoder(r.Body) + err := decoder.Decode(&composeRequest) + if err != nil { + return jsonResponse{ + status: http.StatusBadRequest, + errorMessage: "Error parsing request body", + } + } + + const maxComposeObjects = 32 + if len(composeRequest.SourceObjects) > maxComposeObjects { + return jsonResponse{ + status: http.StatusBadRequest, + errorMessage: fmt.Sprintf("The number of source components provided (%d) exceeds the maximum (%d)", len(composeRequest.SourceObjects), maxComposeObjects), + } + } + + sourceNames := make([]string, 0, len(composeRequest.SourceObjects)) + for _, n := range composeRequest.SourceObjects { + sourceNames = append(sourceNames, n.Name) + } + + backendObj, err := s.backend.ComposeObject(bucketName, sourceNames, destinationObject, composeRequest.Destination.Metadata, composeRequest.Destination.ContentType) + if err != nil { + return jsonResponse{ + status: http.StatusInternalServerError, + errorMessage: "Error running compose", + } + } + defer backendObj.Close() + + obj := fromBackendObjects([]backend.StreamingObject{backendObj})[0] + + s.eventManager.Trigger(&backendObj, notification.EventFinalize, nil) + + return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)} } diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go index 92164cafb1057..f40dcd3fe9dd7 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/response.go @@ -4,73 +4,200 @@ package fakestorage -import "sort" +import ( + "fmt" + "net/url" + "time" + + "cloud.google.com/go/storage" + "github.com/fsouza/fake-gcs-server/internal/backend" +) + +const timestampFormat = "2006-01-02T15:04:05.999999Z07:00" + +func formatTime(t time.Time) string { + if t.IsZero() { + return "" + } + return t.Format(timestampFormat) +} type listResponse struct { - Kind string `json:"kind"` - Items []interface{} `json:"items"` - Prefixes []string `json:"prefixes"` + Kind string `json:"kind"` + Items []any `json:"items,omitempty"` + Prefixes []string `json:"prefixes,omitempty"` } -func newListBucketsResponse(bucketNames []string) listResponse { +func newListBucketsResponse(buckets []backend.Bucket, location string) listResponse { resp := listResponse{ Kind: "storage#buckets", - Items: make([]interface{}, len(bucketNames)), + Items: make([]any, len(buckets)), } - sort.Strings(bucketNames) - for i, name := range bucketNames { - resp.Items[i] = newBucketResponse(name) + for i, bucket := range buckets { + resp.Items[i] = newBucketResponse(bucket, location) } return resp } type bucketResponse struct { - Kind string `json:"kind"` - ID string `json:"id"` - Name string `json:"name"` + Kind string `json:"kind"` + ID string `json:"id"` + DefaultEventBasedHold bool `json:"defaultEventBasedHold"` + Name string `json:"name"` + Versioning *bucketVersioning `json:"versioning,omitempty"` + TimeCreated string `json:"timeCreated,omitempty"` + Updated string `json:"updated,omitempty"` + Location string `json:"location,omitempty"` + StorageClass string `json:"storageClass,omitempty"` + ProjectNumber string `json:"projectNumber"` + Metageneration string `json:"metageneration"` + Etag string `json:"etag"` + LocationType string `json:"locationType"` } -func newBucketResponse(bucketName string) bucketResponse { +type bucketVersioning struct { + Enabled bool `json:"enabled"` +} + +func newBucketResponse(bucket backend.Bucket, location string) bucketResponse { return bucketResponse{ - Kind: "storage#bucket", - ID: bucketName, - Name: bucketName, + Kind: "storage#bucket", + ID: bucket.Name, + Name: bucket.Name, + DefaultEventBasedHold: bucket.DefaultEventBasedHold, + Versioning: &bucketVersioning{bucket.VersioningEnabled}, + TimeCreated: formatTime(bucket.TimeCreated), + Updated: formatTime(bucket.TimeCreated), // not tracking update times yet, reporting `updated` = `timeCreated` + Location: location, + StorageClass: "STANDARD", + ProjectNumber: "0", + Metageneration: "1", + Etag: "RVRhZw==", + LocationType: "region", } } -func newListObjectsResponse(objs []Object, prefixes []string) listResponse { +func newListObjectsResponse(objs []ObjectAttrs, prefixes []string, externalURL string) listResponse { resp := listResponse{ Kind: "storage#objects", - Items: make([]interface{}, len(objs)), + Items: make([]any, len(objs)), Prefixes: prefixes, } for i, obj := range objs { - resp.Items[i] = newObjectResponse(obj) + resp.Items[i] = newObjectResponse(obj, externalURL) } return resp } +// objectAccessControl is copied from the Google SDK to avoid direct +// dependency. +type objectAccessControl struct { + Bucket string `json:"bucket,omitempty"` + Domain string `json:"domain,omitempty"` + Email string `json:"email,omitempty"` + Entity string `json:"entity,omitempty"` + EntityID string `json:"entityId,omitempty"` + Etag string `json:"etag,omitempty"` + Generation int64 `json:"generation,omitempty,string"` + ID string `json:"id,omitempty"` + Kind string `json:"kind,omitempty"` + Object string `json:"object,omitempty"` + ProjectTeam struct { + ProjectNumber string `json:"projectNumber,omitempty"` + Team string `json:"team,omitempty"` + } `json:"projectTeam,omitempty"` + Role string `json:"role,omitempty"` + SelfLink string `json:"selfLink,omitempty"` +} + type objectResponse struct { - Kind string `json:"kind"` - Name string `json:"name"` - ID string `json:"id"` - Bucket string `json:"bucket"` - Size int64 `json:"size,string"` - // Crc32c: CRC32c checksum, same as in google storage client code - Crc32c string `json:"crc32c,omitempty"` - Md5Hash string `json:"md5hash,omitempty"` + Kind string `json:"kind"` + Name string `json:"name"` + ID string `json:"id"` + Bucket string `json:"bucket"` + Size int64 `json:"size,string"` + ContentType string `json:"contentType,omitempty"` + ContentEncoding string `json:"contentEncoding,omitempty"` + ContentDisposition string `json:"contentDisposition,omitempty"` + Crc32c string `json:"crc32c,omitempty"` + ACL []*objectAccessControl `json:"acl,omitempty"` + Md5Hash string `json:"md5Hash,omitempty"` + Etag string `json:"etag,omitempty"` + StorageClass string `json:"storageClass"` + TimeCreated string `json:"timeCreated,omitempty"` + TimeDeleted string `json:"timeDeleted,omitempty"` + TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"` + Updated string `json:"updated,omitempty"` + Generation int64 `json:"generation,string"` + CustomTime string `json:"customTime,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + SelfLink string `json:"selfLink,omitempty"` + MediaLink string `json:"mediaLink,omitempty"` + Metageneration string `json:"metageneration,omitempty"` } -func newObjectResponse(obj Object) objectResponse { +func newProjectedObjectResponse(obj ObjectAttrs, externalURL string, projection storage.Projection) objectResponse { + objResponse := newObjectResponse(obj, externalURL) + if projection == storage.ProjectionNoACL { + objResponse.ACL = nil + } + return objResponse +} + +func newObjectResponse(obj ObjectAttrs, externalURL string) objectResponse { + acl := getAccessControlsListFromObject(obj) + return objectResponse{ - Kind: "storage#object", - ID: obj.id(), - Bucket: obj.BucketName, - Name: obj.Name, - Size: int64(len(obj.Content)), - Crc32c: obj.Crc32c, - Md5Hash: obj.Md5Hash, + Kind: "storage#object", + ID: obj.id(), + Bucket: obj.BucketName, + Name: obj.Name, + Size: obj.Size, + ContentType: obj.ContentType, + ContentEncoding: obj.ContentEncoding, + ContentDisposition: obj.ContentDisposition, + Crc32c: obj.Crc32c, + Md5Hash: obj.Md5Hash, + Etag: obj.Etag, + ACL: acl, + StorageClass: "STANDARD", + Metadata: obj.Metadata, + TimeCreated: formatTime(obj.Created), + TimeDeleted: formatTime(obj.Deleted), + TimeStorageClassUpdated: formatTime(obj.Updated), + Updated: formatTime(obj.Updated), + CustomTime: formatTime(obj.CustomTime), + Generation: obj.Generation, + SelfLink: fmt.Sprintf("%s/storage/v1/b/%s/o/%s", externalURL, url.PathEscape(obj.BucketName), url.PathEscape(obj.Name)), + MediaLink: fmt.Sprintf("%s/download/storage/v1/b/%s/o/%s?alt=media", externalURL, url.PathEscape(obj.BucketName), url.PathEscape(obj.Name)), + Metageneration: "1", + } +} + +type aclListResponse struct { + Items []*objectAccessControl `json:"items"` +} + +func newACLListResponse(obj ObjectAttrs) aclListResponse { + if len(obj.ACL) == 0 { + return aclListResponse{} + } + return aclListResponse{Items: getAccessControlsListFromObject(obj)} +} + +func getAccessControlsListFromObject(obj ObjectAttrs) []*objectAccessControl { + aclItems := make([]*objectAccessControl, len(obj.ACL)) + for idx, aclRule := range obj.ACL { + aclItems[idx] = &objectAccessControl{ + Bucket: obj.BucketName, + Entity: string(aclRule.Entity), + Object: obj.Name, + Role: string(aclRule.Role), + Etag: "RVRhZw==", + Kind: "storage#objectAccessControl", + } } + return aclItems } type rewriteResponse struct { @@ -82,14 +209,14 @@ type rewriteResponse struct { Resource objectResponse `json:"resource"` } -func newObjectRewriteResponse(obj Object) rewriteResponse { +func newObjectRewriteResponse(obj ObjectAttrs, externalURL string) rewriteResponse { return rewriteResponse{ Kind: "storage#rewriteResponse", - TotalBytesRewritten: int64(len(obj.Content)), - ObjectSize: int64(len(obj.Content)), + TotalBytesRewritten: obj.Size, + ObjectSize: obj.Size, Done: true, RewriteToken: "", - Resource: newObjectResponse(obj), + Resource: newObjectResponse(obj, externalURL), } } diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go index 165d9d7ec2ed4..dc6fa9bfd3390 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/server.go @@ -5,30 +5,52 @@ package fakestorage import ( + "bufio" + "bytes" + "compress/gzip" "context" "crypto/tls" + "errors" "fmt" + "io" + "mime" + "mime/multipart" "net" "net/http" "net/http/httptest" + "net/http/httputil" + "net/textproto" + "net/url" + "os" + "path/filepath" + "strings" "sync" "cloud.google.com/go/storage" "github.com/fsouza/fake-gcs-server/internal/backend" + "github.com/fsouza/fake-gcs-server/internal/checksum" + "github.com/fsouza/fake-gcs-server/internal/notification" + "github.com/gorilla/handlers" "github.com/gorilla/mux" + "golang.org/x/oauth2/google" "google.golang.org/api/option" ) +const defaultPublicHost = "storage.googleapis.com" + // Server is the fake server. // // It provides a fake implementation of the Google Cloud Storage API. type Server struct { - backend backend.Storage - uploads map[string]Object - transport http.RoundTripper - ts *httptest.Server - mux *mux.Router - mtx sync.RWMutex + backend backend.Storage + uploads sync.Map + transport http.RoundTripper + ts *httptest.Server + handler http.Handler + options Options + externalURL string + publicHost string + eventManager notification.EventManager } // NewServer creates a new instance of the server, pre-loaded with the given @@ -41,6 +63,8 @@ func NewServer(objects []Object) *Server { } // NewServerWithHostPort creates a new server that listens on a custom host and port +// +// Deprecated: use NewServerWithOptions. func NewServerWithHostPort(objects []Object, host string, port uint16) (*Server, error) { return NewServerWithOptions(Options{ InitialObjects: objects, @@ -49,30 +73,104 @@ func NewServerWithHostPort(objects []Object, host string, port uint16) (*Server, }) } -// Options are used to configure the server on creation +type EventManagerOptions = notification.EventManagerOptions + +// Options are used to configure the server on creation. type Options struct { InitialObjects []Object StorageRoot string + Seed string + Scheme string Host string Port uint16 // when set to true, the server will not actually start a TCP listener, // client requests will get processed by an internal mocked transport. NoListener bool + + // Optional external URL, such as https://gcs.127.0.0.1.nip.io:4443 + // Returned in the Location header for resumable uploads + // The "real" value is https://www.googleapis.com, the JSON API + // The default is whatever the server is bound to, such as https://0.0.0.0:4443 + ExternalURL string + + // Optional URL for public access + // An example is "storage.gcs.127.0.0.1.nip.io:4443", which will configure + // the server to serve objects at: + // https://storage.gcs.127.0.0.1.nip.io:4443// + // https://.storage.gcs.127.0.0.1.nip.io:4443>/ + // If unset, the default is "storage.googleapis.com", the XML API + PublicHost string + + // Optional list of headers to add to the CORS header allowlist + // An example is "X-Goog-Meta-Uploader", which will allow a + // custom metadata header named "X-Goog-Meta-Uploader" to be + // sent through the browser + AllowedCORSHeaders []string + + // Destination for writing log. + Writer io.Writer + + // EventOptions contains the events that should be published and the URL + // of the Google cloud function such events should be published to. + EventOptions EventManagerOptions + + // Location used for buckets in the server. + BucketsLocation string + + CertificateLocation string + + PrivateKeyLocation string } -// NewServerWithOptions creates a new server with custom options +// NewServerWithOptions creates a new server configured according to the +// provided options. func NewServerWithOptions(options Options) (*Server, error) { - s, err := newServer(options.InitialObjects, options.StorageRoot) + s, err := newServer(options) if err != nil { return nil, err } + + allowedHeaders := []string{"Content-Type", "Content-Encoding", "Range", "Content-Range"} + allowedHeaders = append(allowedHeaders, options.AllowedCORSHeaders...) + + cors := handlers.CORS( + handlers.AllowedMethods([]string{ + http.MethodHead, + http.MethodGet, + http.MethodPost, + http.MethodPut, + http.MethodPatch, + http.MethodDelete, + }), + handlers.AllowedHeaders(allowedHeaders), + handlers.AllowedOrigins([]string{"*"}), + handlers.AllowCredentials(), + handlers.ExposedHeaders([]string{"Location"}), + ) + + s.handler = cors(s.handler) + if options.Writer != nil { + s.handler = handlers.LoggingHandler(options.Writer, s.handler) + } + s.handler = requestCompressHandler(s.handler) + s.transport = &muxTransport{handler: s.handler} + + s.eventManager, err = notification.NewPubsubEventManager(options.EventOptions, options.Writer) + if err != nil { + return nil, err + } + if options.NoListener { - s.setTransportToMux() return s, nil } - s.ts = httptest.NewUnstartedServer(s.mux) + s.ts = httptest.NewUnstartedServer(s.handler) + startFunc := s.ts.StartTLS + if options.Scheme == "http" { + startFunc = s.ts.Start + } + if options.Port != 0 { addr := fmt.Sprintf("%s:%d", options.Host, options.Port) l, err := net.Listen("tcp", addr) @@ -81,64 +179,255 @@ func NewServerWithOptions(options Options) (*Server, error) { } s.ts.Listener.Close() s.ts.Listener = l - s.ts.StartTLS() - } else { - s.ts.StartTLS() } - s.setTransportToAddr(s.ts.Listener.Addr().String()) + if options.CertificateLocation != "" && options.PrivateKeyLocation != "" { + cert, err := tls.LoadX509KeyPair(options.CertificateLocation, options.PrivateKeyLocation) + if err != nil { + return nil, err + } + s.ts.TLS = &tls.Config{Certificates: []tls.Certificate{cert}} + } + startFunc() + return s, nil } -func newServer(objects []Object, storageRoot string) (*Server, error) { - backendObjects := toBackendObjects(objects) +func newServer(options Options) (*Server, error) { + if len(options.InitialObjects) > 0 && options.Seed != "" { + return nil, errors.New("please provide either a seed directory or a list of initial objects") + } + + var backendObjects []backend.StreamingObject + if len(options.InitialObjects) > 0 { + backendObjects = bufferedObjectsToBackendObjects(options.InitialObjects) + } + var backendStorage backend.Storage var err error - if storageRoot != "" { - backendStorage, err = backend.NewStorageFS(backendObjects, storageRoot) + if options.StorageRoot != "" { + backendStorage, err = backend.NewStorageFS(backendObjects, options.StorageRoot) } else { - backendStorage = backend.NewStorageMemory(backendObjects) + backendStorage, err = backend.NewStorageMemory(backendObjects) } if err != nil { return nil, err } + publicHost := options.PublicHost + if publicHost == "" { + publicHost = defaultPublicHost + } + s := Server{ - backend: backendStorage, - uploads: make(map[string]Object), + backend: backendStorage, + uploads: sync.Map{}, + externalURL: options.ExternalURL, + publicHost: publicHost, + options: options, + eventManager: ¬ification.PubsubEventManager{}, } s.buildMuxer() + _, err = s.seed() + if err != nil { + return nil, err + } return &s, nil } -func (s *Server) setTransportToAddr(addr string) { - // #nosec - tlsConfig := tls.Config{InsecureSkipVerify: true} - s.transport = &http.Transport{ - TLSClientConfig: &tlsConfig, - DialTLS: func(string, string) (net.Conn, error) { - return tls.Dial("tcp", addr, &tlsConfig) - }, +func unescapeMuxVars(vars map[string]string) map[string]string { + m := make(map[string]string) + for k, v := range vars { + r, err := url.PathUnescape(v) + if err == nil { + m[k] = r + } else { + m[k] = v + } } + return m } -func (s *Server) setTransportToMux() { - s.transport = &muxTransport{router: s.mux} +func (s *Server) buildMuxer() { + const apiPrefix = "/storage/v1" + handler := mux.NewRouter().SkipClean(true).UseEncodedPath() + + // healthcheck + handler.Path("/_internal/healthcheck").Methods(http.MethodGet).HandlerFunc(s.healthcheck) + + routers := []*mux.Router{ + handler.PathPrefix(apiPrefix).Subrouter(), + handler.MatcherFunc(s.publicHostMatcher).PathPrefix(apiPrefix).Subrouter(), + } + + for _, r := range routers { + r.Path("/b").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listBuckets)) + r.Path("/b/").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listBuckets)) + r.Path("/b").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.createBucketByPost)) + r.Path("/b/").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.createBucketByPost)) + r.Path("/b/{bucketName}").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.getBucket)) + r.Path("/b/{bucketName}").Methods(http.MethodPatch).HandlerFunc(jsonToHTTPHandler(s.updateBucket)) + r.Path("/b/{bucketName}").Methods(http.MethodDelete).HandlerFunc(jsonToHTTPHandler(s.deleteBucket)) + r.Path("/b/{bucketName}/o").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjects)) + r.Path("/b/{bucketName}/o/").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjects)) + r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodPatch).HandlerFunc(jsonToHTTPHandler(s.patchObject)) + r.Path("/b/{bucketName}/o/{objectName:.+}/acl").Methods(http.MethodGet).HandlerFunc(jsonToHTTPHandler(s.listObjectACL)) + r.Path("/b/{bucketName}/o/{objectName:.+}/acl").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.setObjectACL)) + r.Path("/b/{bucketName}/o/{objectName:.+}/acl/{entity}").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.setObjectACL)) + r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.getObject) + r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodDelete).HandlerFunc(jsonToHTTPHandler(s.deleteObject)) + r.Path("/b/{sourceBucket}/o/{sourceObject:.+}/{copyType:rewriteTo|copyTo}/b/{destinationBucket}/o/{destinationObject:.+}").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.rewriteObject)) + r.Path("/b/{bucketName}/o/{destinationObject:.+}/compose").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.composeObject)) + r.Path("/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.updateObject)) + } + + // Internal / update server configuration + handler.Path("/_internal/config").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.updateServerConfig)) + handler.MatcherFunc(s.publicHostMatcher).Path("/_internal/config").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.updateServerConfig)) + handler.Path("/_internal/reseed").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.reseedServer)) + // Internal - end + + // XML API + xmlApiRouters := []*mux.Router{ + handler.Host(fmt.Sprintf("{bucketName}.%s", s.publicHost)).Subrouter(), + handler.MatcherFunc(s.publicHostMatcher).PathPrefix(`/{bucketName}`).Subrouter(), + } + for _, r := range xmlApiRouters { + r.Path("/").Methods(http.MethodGet).HandlerFunc(xmlToHTTPHandler(s.xmlListObjects)) + r.Path("").Methods(http.MethodGet).HandlerFunc(xmlToHTTPHandler(s.xmlListObjects)) + } + + bucketHost := fmt.Sprintf("{bucketName}.%s", s.publicHost) + handler.Host(bucketHost).Path("/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject) + handler.Path("/download/storage/v1/b/{bucketName}/o/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject) + handler.Path("/upload/storage/v1/b/{bucketName}/o").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.insertObject)) + handler.Path("/upload/storage/v1/b/{bucketName}/o/").Methods(http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.insertObject)) + handler.Path("/upload/storage/v1/b/{bucketName}/o").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent)) + handler.Path("/upload/storage/v1/b/{bucketName}/o/").Methods(http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent)) + handler.Path("/upload/resumable/{uploadId}").Methods(http.MethodPut, http.MethodPost).HandlerFunc(jsonToHTTPHandler(s.uploadFileContent)) + + // Batch endpoint + handler.MatcherFunc(s.publicHostMatcher).Path("/batch/storage/v1").Methods(http.MethodPost).HandlerFunc(s.handleBatchCall) + handler.Path("/batch/storage/v1").Methods(http.MethodPost).HandlerFunc(s.handleBatchCall) + + handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject) + handler.Host("{bucketName:.+}").Path("/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.downloadObject) + + // Form Uploads + handler.Host(s.publicHost).Path("/{bucketName}").MatcherFunc(matchFormData).Methods(http.MethodPost, http.MethodPut).HandlerFunc(xmlToHTTPHandler(s.insertFormObject)) + handler.Host(bucketHost).MatcherFunc(matchFormData).Methods(http.MethodPost, http.MethodPut).HandlerFunc(xmlToHTTPHandler(s.insertFormObject)) + + // Signed URLs (upload and download) + handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject)) + handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodGet, http.MethodHead).HandlerFunc(s.getObject) + handler.MatcherFunc(s.publicHostMatcher).Path("/{bucketName}/{objectName:.+}").Methods(http.MethodDelete).HandlerFunc(jsonToHTTPHandler(s.deleteObject)) + handler.Host(bucketHost).Path("/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject)) + handler.Host("{bucketName:.+}").Path("/{objectName:.+}").Methods(http.MethodPost, http.MethodPut).HandlerFunc(jsonToHTTPHandler(s.insertObject)) + + s.handler = handler } -func (s *Server) buildMuxer() { - s.mux = mux.NewRouter() - s.mux.Host("storage.googleapis.com").Path("/{bucketName}/{objectName:.+}").Methods("GET", "HEAD").HandlerFunc(s.downloadObject) - s.mux.Host("{bucketName}.storage.googleapis.com").Path("/{objectName:.+}").Methods("GET", "HEAD").HandlerFunc(s.downloadObject) - r := s.mux.PathPrefix("/storage/v1").Subrouter() - r.Path("/b").Methods("GET").HandlerFunc(s.listBuckets) - r.Path("/b/{bucketName}").Methods("GET").HandlerFunc(s.getBucket) - r.Path("/b/{bucketName}/o").Methods("GET").HandlerFunc(s.listObjects) - r.Path("/b/{bucketName}/o").Methods("POST").HandlerFunc(s.insertObject) - r.Path("/b/{bucketName}/o/{objectName:.+}").Methods("GET").HandlerFunc(s.getObject) - r.Path("/b/{bucketName}/o/{objectName:.+}").Methods("DELETE").HandlerFunc(s.deleteObject) - r.Path("/b/{sourceBucket}/o/{sourceObject:.+}/rewriteTo/b/{destinationBucket}/o/{destinationObject:.+}").HandlerFunc(s.rewriteObject) - s.mux.Path("/download/storage/v1/b/{bucketName}/o/{objectName}").Methods("GET").HandlerFunc(s.downloadObject) - s.mux.Path("/upload/storage/v1/b/{bucketName}/o").Methods("POST").HandlerFunc(s.insertObject) - s.mux.Path("/upload/resumable/{uploadId}").Methods("PUT", "POST").HandlerFunc(s.uploadFileContent) +func (s *Server) seed() ([]backend.StreamingObject, error) { + if s.options.Seed == "" { + return nil, nil + } + + initialObjects, emptyBuckets := generateObjectsFromFiles(s.options.Seed) + + backendObjects := bufferedObjectsToBackendObjects(initialObjects) + + var err error + if s.options.StorageRoot != "" { + s.backend, err = backend.NewStorageFS(backendObjects, s.options.StorageRoot) + } else { + s.backend, err = backend.NewStorageMemory(backendObjects) + } + if err != nil { + return nil, err + } + + for _, bucketName := range emptyBuckets { + s.CreateBucketWithOpts(CreateBucketOpts{Name: bucketName}) + } + return backendObjects, nil +} + +func (s *Server) reseedServer(r *http.Request) jsonResponse { + backendObjects, err := s.seed() + if err != nil { + return errToJsonResponse(err) + } + + return jsonResponse{data: fromBackendObjects(backendObjects)} +} + +func generateObjectsFromFiles(folder string) ([]Object, []string) { + var objects []Object + var emptyBuckets []string + if files, err := os.ReadDir(folder); err == nil { + for _, f := range files { + if !f.IsDir() { + continue + } + bucketName := f.Name() + localBucketPath := filepath.Join(folder, bucketName) + + bucketObjects, err := objectsFromBucket(localBucketPath, bucketName) + if err != nil { + continue + } + + if len(bucketObjects) < 1 { + emptyBuckets = append(emptyBuckets, bucketName) + } + objects = append(objects, bucketObjects...) + } + } + return objects, emptyBuckets +} + +func objectsFromBucket(localBucketPath, bucketName string) ([]Object, error) { + var objects []Object + err := filepath.Walk(localBucketPath, func(path string, info os.FileInfo, _ error) error { + if info.Mode().IsRegular() { + // Rel() should never return error since path always descend from localBucketPath + relPath, _ := filepath.Rel(localBucketPath, path) + objectKey := filepath.ToSlash(relPath) + fileContent, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("could not read file %q: %w", path, err) + } + objects = append(objects, Object{ + ObjectAttrs: ObjectAttrs{ + ACL: []storage.ACLRule{ + { + Entity: "projectOwner-test-project", + Role: "OWNER", + }, + }, + BucketName: bucketName, + Name: objectKey, + ContentType: mime.TypeByExtension(filepath.Ext(path)), + Crc32c: checksum.EncodedCrc32cChecksum(fileContent), + Md5Hash: checksum.EncodedMd5Hash(fileContent), + }, + Content: fileContent, + }) + } + return nil + }) + return objects, err +} + +func (s *Server) healthcheck(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +// publicHostMatcher matches incoming requests against the currently specified server publicHost. +func (s *Server) publicHostMatcher(r *http.Request, rm *mux.RouteMatch) bool { + if strings.Contains(s.publicHost, ":") || !strings.Contains(r.Host, ":") { + return r.Host == s.publicHost + } + idx := strings.IndexByte(r.Host, ':') + return r.Host[:idx] == s.publicHost } // Stop stops the server, closing all connections. @@ -153,20 +442,136 @@ func (s *Server) Stop() { // URL returns the server URL. func (s *Server) URL() string { + if s.externalURL != "" { + return s.externalURL + } if s.ts != nil { return s.ts.URL } return "" } +// PublicURL returns the server's public download URL. +func (s *Server) PublicURL() string { + return fmt.Sprintf("%s://%s", s.scheme(), s.publicHost) +} + +func (s *Server) Backend() backend.Storage { + return s.backend +} + +func (s *Server) scheme() string { + if s.options.Scheme == "http" { + return "http" + } + return "https" +} + // HTTPClient returns an HTTP client configured to talk to the server. func (s *Server) HTTPClient() *http.Client { return &http.Client{Transport: s.transport} } +// HTTPHandler returns an HTTP handler that behaves like GCS. +func (s *Server) HTTPHandler() http.Handler { + return s.handler +} + // Client returns a GCS client configured to talk to the server. func (s *Server) Client() *storage.Client { - opt := option.WithHTTPClient(s.HTTPClient()) - client, _ := storage.NewClient(context.Background(), opt) + client, err := storage.NewClient(context.Background(), option.WithHTTPClient(s.HTTPClient()), option.WithCredentials(&google.Credentials{})) + if err != nil { + panic(err) + } return client } + +func (s *Server) handleBatchCall(w http.ResponseWriter, r *http.Request) { + reader, err := r.MultipartReader() + if err != nil { + http.Error(w, "invalid Content-Type header", http.StatusBadRequest) + return + } + + var b bytes.Buffer + mw := multipart.NewWriter(&b) + defer mw.Close() + w.Header().Set("Content-Type", "multipart/mixed; boundary="+mw.Boundary()) + + w.WriteHeader(http.StatusOK) + part, err := reader.NextPart() + for ; err == nil; part, err = reader.NextPart() { + contentID := part.Header.Get("Content-ID") + if contentID == "" { + // missing content ID, skip + continue + } + + partHeaders := textproto.MIMEHeader{} + partHeaders.Set("Content-Type", "application/http") + partHeaders.Set("Content-ID", strings.Replace(contentID, "<", "@,;:\"/[]?= +// (including space). gsutil likes to use `=` in the boundary, but incorrectly +// quotes it using single quotes. +// +// We do exclude \ and " from the regexp because those are not supported by the +// mime package. +// +// This has been reported to gsutil +// (https://github.com/GoogleCloudPlatform/gsutil/issues/1466). If that issue +// ever gets closed, we should be able to get rid of this hack. +var gsutilBoundary = regexp.MustCompile(`boundary='([^']*[()<>@,;:"/\[\]?= ]+[^']*)'`) + type multipartMetadata struct { - Name string `json:"name"` + ContentType string `json:"contentType"` + ContentEncoding string `json:"contentEncoding"` + ContentDisposition string `json:"contentDisposition"` + CacheControl string `json:"cacheControl"` + CustomTime time.Time `json:"customTime,omitempty"` + Name string `json:"name"` + Metadata map[string]string `json:"metadata"` +} + +type contentRange struct { + KnownRange bool // Is the range known, or "*"? + KnownTotal bool // Is the total known, or "*"? + Start int // Start of the range, -1 if unknown + End int // End of the range, -1 if unknown + Total int // Total bytes expected, -1 if unknown } -func (s *Server) insertObject(w http.ResponseWriter, r *http.Request) { - s.mtx.Lock() - defer s.mtx.Unlock() - bucketName := mux.Vars(r)["bucketName"] - if err := s.backend.GetBucket(bucketName); err != nil { - w.WriteHeader(http.StatusNotFound) - err := newErrorResponse(http.StatusNotFound, "Not found", nil) - json.NewEncoder(w).Encode(err) - return +type generationCondition struct { + ifGenerationMatch *int64 + ifGenerationNotMatch *int64 +} + +func (c generationCondition) ConditionsMet(activeGeneration int64) bool { + if c.ifGenerationMatch != nil && *c.ifGenerationMatch != activeGeneration { + return false + } + if c.ifGenerationNotMatch != nil && *c.ifGenerationNotMatch == activeGeneration { + return false + } + return true +} + +func (s *Server) insertObject(r *http.Request) jsonResponse { + bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] + + if _, err := s.backend.GetBucket(bucketName); err != nil { + return jsonResponse{status: http.StatusNotFound} } uploadType := r.URL.Query().Get("uploadType") + if uploadType == "" && r.Header.Get("X-Goog-Upload-Protocol") == uploadTypeResumable { + uploadType = uploadTypeResumable + } + switch uploadType { - case "media": - s.simpleUpload(bucketName, w, r) - case "multipart": - s.multipartUpload(bucketName, w, r) - case "resumable": - s.resumableUpload(bucketName, w, r) + case uploadTypeMedia: + return s.simpleUpload(bucketName, r) + case uploadTypeMultipart: + return s.multipartUpload(bucketName, r) + case uploadTypeResumable: + return s.resumableUpload(bucketName, r) default: - http.Error(w, "invalid uploadType", http.StatusBadRequest) + // Support Signed URL Uploads + if r.URL.Query().Get("X-Goog-Algorithm") != "" { + switch r.Method { + case http.MethodPost: + return s.resumableUpload(bucketName, r) + case http.MethodPut: + return s.signedUpload(bucketName, r) + } + } + return jsonResponse{errorMessage: "invalid uploadType", status: http.StatusBadRequest} } } -func (s *Server) simpleUpload(bucketName string, w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - name := r.URL.Query().Get("name") +func (s *Server) insertFormObject(r *http.Request) xmlResponse { + bucketName := unescapeMuxVars(mux.Vars(r))["bucketName"] + + if err := r.ParseMultipartForm(32 << 20); nil != err { + return xmlResponse{errorMessage: "invalid form", status: http.StatusBadRequest} + } + + // Load metadata + var name string + if keys, ok := r.MultipartForm.Value["key"]; ok { + name = keys[0] + } if name == "" { - http.Error(w, "name is required for simple uploads", http.StatusBadRequest) - return + return xmlResponse{errorMessage: "missing key", status: http.StatusBadRequest} + } + var predefinedACL string + if acls, ok := r.MultipartForm.Value["acl"]; ok { + predefinedACL = acls[0] + } + var contentEncoding string + if contentEncodings, ok := r.MultipartForm.Value["Content-Encoding"]; ok { + contentEncoding = contentEncodings[0] + } + var contentType string + if contentTypes, ok := r.MultipartForm.Value["Content-Type"]; ok { + contentType = contentTypes[0] + } + successActionStatus := http.StatusNoContent + if successActionStatuses, ok := r.MultipartForm.Value["success_action_status"]; ok { + successInt, err := strconv.Atoi(successActionStatuses[0]) + if err != nil { + return xmlResponse{errorMessage: err.Error(), status: http.StatusBadRequest} + } + if successInt != http.StatusOK && successInt != http.StatusCreated && successInt != http.StatusNoContent { + return xmlResponse{errorMessage: "invalid success action status", status: http.StatusBadRequest} + } + successActionStatus = successInt + } + metaData := make(map[string]string) + for key := range r.MultipartForm.Value { + lowerKey := strings.ToLower(key) + if metaDataKey := strings.TrimPrefix(lowerKey, "x-goog-meta-"); metaDataKey != lowerKey { + metaData[metaDataKey] = r.MultipartForm.Value[key][0] + } + } + + // Load file + var file *multipart.FileHeader + if files, ok := r.MultipartForm.File["file"]; ok { + file = files[0] + } + if file == nil { + return xmlResponse{errorMessage: "missing file", status: http.StatusBadRequest} } - data, err := ioutil.ReadAll(r.Body) + infile, err := file.Open() if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + return xmlResponse{errorMessage: err.Error()} } - obj := Object{BucketName: bucketName, Name: name, Content: data, Crc32c: encodedCrc32cChecksum(data), Md5Hash: encodedMd5Hash(data)} - err = s.createObject(obj) + obj := StreamingObject{ + ObjectAttrs: ObjectAttrs{ + BucketName: bucketName, + Name: name, + ContentType: contentType, + ContentEncoding: contentEncoding, + ACL: getObjectACL(predefinedACL), + Metadata: metaData, + }, + Content: infile, + } + obj, err = s.createObject(obj, backend.NoConditions{}) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + return xmlResponse{errorMessage: err.Error()} } - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(obj) + defer obj.Close() + + if successActionStatus == 201 { + objectURI := fmt.Sprintf("%s/%s%s", s.URL(), bucketName, name) + xmlBody := createXmlResponseBody(bucketName, obj.Etag, strings.TrimPrefix(name, "/"), objectURI) + return xmlResponse{status: successActionStatus, data: xmlBody} + } + return xmlResponse{status: successActionStatus} } -var crc32cTable = crc32.MakeTable(crc32.Castagnoli) +func (s *Server) wrapUploadPreconditions(r *http.Request, bucketName string, objectName string) (generationCondition, error) { + result := generationCondition{ + ifGenerationMatch: nil, + ifGenerationNotMatch: nil, + } + ifGenerationMatch := r.URL.Query().Get("ifGenerationMatch") + + if ifGenerationMatch != "" { + gen, err := strconv.ParseInt(ifGenerationMatch, 10, 64) + if err != nil { + return generationCondition{}, err + } + result.ifGenerationMatch = &gen + } + + ifGenerationNotMatch := r.URL.Query().Get("ifGenerationNotMatch") -func crc32cChecksum(content []byte) []byte { - checksummer := crc32.New(crc32cTable) - checksummer.Write(content) - return checksummer.Sum(make([]byte, 0, 4)) + if ifGenerationNotMatch != "" { + gen, err := strconv.ParseInt(ifGenerationNotMatch, 10, 64) + if err != nil { + return generationCondition{}, err + } + result.ifGenerationNotMatch = &gen + } + + return result, nil } -func encodedChecksum(checksum []byte) string { - return base64.StdEncoding.EncodeToString(checksum) +func (s *Server) simpleUpload(bucketName string, r *http.Request) jsonResponse { + defer r.Body.Close() + name := r.URL.Query().Get("name") + predefinedACL := r.URL.Query().Get("predefinedAcl") + contentEncoding := r.URL.Query().Get("contentEncoding") + customTime := r.URL.Query().Get("customTime") + if name == "" { + return jsonResponse{ + status: http.StatusBadRequest, + errorMessage: "name is required for simple uploads", + } + } + obj := StreamingObject{ + ObjectAttrs: ObjectAttrs{ + BucketName: bucketName, + Name: name, + ContentType: r.Header.Get(contentTypeHeader), + CacheControl: r.Header.Get(cacheControlHeader), + ContentEncoding: contentEncoding, + CustomTime: convertTimeWithoutError(customTime), + ACL: getObjectACL(predefinedACL), + }, + Content: notImplementedSeeker{r.Body}, + } + obj, err := s.createObject(obj, backend.NoConditions{}) + if err != nil { + return errToJsonResponse(err) + } + obj.Close() + return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)} } -func encodedCrc32cChecksum(content []byte) string { - return encodedChecksum(crc32cChecksum(content)) +type notImplementedSeeker struct { + io.ReadCloser } -func md5Hash(b []byte) []byte { - /* #nosec G401 */ - h := md5.New() - h.Write(b) - return h.Sum(nil) +func (s notImplementedSeeker) Seek(offset int64, whence int) (int64, error) { + return 0, errors.New("not implemented") } -func encodedHash(hash []byte) string { - return base64.StdEncoding.EncodeToString(hash) +func (s *Server) signedUpload(bucketName string, r *http.Request) jsonResponse { + defer r.Body.Close() + name := unescapeMuxVars(mux.Vars(r))["objectName"] + predefinedACL := r.URL.Query().Get("predefinedAcl") + contentEncoding := r.URL.Query().Get("contentEncoding") + customTime := r.URL.Query().Get("customTime") + + // Load data from HTTP Headers + if contentEncoding == "" { + contentEncoding = r.Header.Get("Content-Encoding") + } + + metaData := make(map[string]string) + for key := range r.Header { + lowerKey := strings.ToLower(key) + if metaDataKey := strings.TrimPrefix(lowerKey, "x-goog-meta-"); metaDataKey != lowerKey { + metaData[metaDataKey] = r.Header.Get(key) + } + } + + obj := StreamingObject{ + ObjectAttrs: ObjectAttrs{ + BucketName: bucketName, + Name: name, + ContentType: r.Header.Get(contentTypeHeader), + ContentEncoding: contentEncoding, + CustomTime: convertTimeWithoutError(customTime), + ACL: getObjectACL(predefinedACL), + Metadata: metaData, + }, + Content: notImplementedSeeker{r.Body}, + } + obj, err := s.createObject(obj, backend.NoConditions{}) + if err != nil { + return errToJsonResponse(err) + } + obj.Close() + return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)} } -func encodedMd5Hash(content []byte) string { - return encodedHash(md5Hash(content)) +func getObjectACL(predefinedACL string) []storage.ACLRule { + if predefinedACL == "publicRead" { + return []storage.ACLRule{ + { + Entity: "allUsers", + Role: "READER", + }, + } + } + + return []storage.ACLRule{ + { + Entity: "projectOwner-test-project", + Role: "OWNER", + }, + } } -func (s *Server) multipartUpload(bucketName string, w http.ResponseWriter, r *http.Request) { +func (s *Server) multipartUpload(bucketName string, r *http.Request) jsonResponse { defer r.Body.Close() - _, params, err := mime.ParseMediaType(r.Header.Get("Content-Type")) + params, err := parseContentTypeParams(r.Header.Get(contentTypeHeader)) if err != nil { - http.Error(w, "invalid Content-Type header", http.StatusBadRequest) - return + return jsonResponse{ + status: http.StatusBadRequest, + errorMessage: "invalid Content-Type header", + } } var ( metadata *multipartMetadata content []byte ) + var contentType string reader := multipart.NewReader(r.Body, params["boundary"]) + + var partReaders []io.Reader + part, err := reader.NextPart() for ; err == nil; part, err = reader.NextPart() { if metadata == nil { metadata, err = loadMetadata(part) + contentType = metadata.ContentType } else { + contentType = part.Header.Get(contentTypeHeader) content, err = loadContent(part) + partReaders = append(partReaders, bytes.NewReader(content)) } if err != nil { break } } if err != io.EOF { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + return jsonResponse{errorMessage: err.Error()} + } + + objName := r.URL.Query().Get("name") + predefinedACL := r.URL.Query().Get("predefinedAcl") + if objName == "" { + objName = metadata.Name } - obj := Object{BucketName: bucketName, Name: metadata.Name, Content: content, Crc32c: encodedCrc32cChecksum(content), Md5Hash: encodedMd5Hash(content)} - err = s.createObject(obj) + + conditions, err := s.wrapUploadPreconditions(r, bucketName, objName) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + return jsonResponse{ + status: http.StatusBadRequest, + errorMessage: err.Error(), + } + } + + obj := StreamingObject{ + ObjectAttrs: ObjectAttrs{ + BucketName: bucketName, + Name: objName, + ContentType: contentType, + CacheControl: metadata.CacheControl, + ContentEncoding: metadata.ContentEncoding, + ContentDisposition: metadata.ContentDisposition, + CustomTime: metadata.CustomTime, + ACL: getObjectACL(predefinedACL), + Metadata: metadata.Metadata, + }, + Content: notImplementedSeeker{io.NopCloser(io.MultiReader(partReaders...))}, } - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(obj) + + obj, err = s.createObject(obj, conditions) + if err != nil { + return errToJsonResponse(err) + } + defer obj.Close() + return jsonResponse{data: newObjectResponse(obj.ObjectAttrs, s.externalURL)} } -func (s *Server) resumableUpload(bucketName string, w http.ResponseWriter, r *http.Request) { - objName := r.URL.Query().Get("name") - if objName == "" { - metadata, err := loadMetadata(r.Body) +func parseContentTypeParams(requestContentType string) (map[string]string, error) { + requestContentType = gsutilBoundary.ReplaceAllString(requestContentType, `boundary="$1"`) + _, params, err := mime.ParseMediaType(requestContentType) + return params, err +} + +func (s *Server) resumableUpload(bucketName string, r *http.Request) jsonResponse { + if r.URL.Query().Has("upload_id") { + return s.uploadFileContent(r) + } + predefinedACL := r.URL.Query().Get("predefinedAcl") + contentEncoding := r.URL.Query().Get("contentEncoding") + metadata := new(multipartMetadata) + if r.Body != http.NoBody { + var err error + metadata, err = loadMetadata(r.Body) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + return jsonResponse{errorMessage: err.Error()} } + } + objName := r.URL.Query().Get("name") + if objName == "" { objName = metadata.Name } - obj := Object{BucketName: bucketName, Name: objName} + if contentEncoding == "" { + contentEncoding = metadata.ContentEncoding + } + obj := Object{ + ObjectAttrs: ObjectAttrs{ + BucketName: bucketName, + Name: objName, + ContentType: metadata.ContentType, + CacheControl: metadata.CacheControl, + ContentEncoding: contentEncoding, + CustomTime: metadata.CustomTime, + ACL: getObjectACL(predefinedACL), + Metadata: metadata.Metadata, + }, + } uploadID, err := generateUploadID() if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + return jsonResponse{errorMessage: err.Error()} + } + s.uploads.Store(uploadID, obj) + header := make(http.Header) + location := fmt.Sprintf( + "%s/upload/storage/v1/b/%s/o?uploadType=resumable&name=%s&upload_id=%s", + s.URL(), + bucketName, + url.PathEscape(objName), + uploadID, + ) + header.Set("Location", location) + if r.Header.Get("X-Goog-Upload-Command") == "start" { + header.Set("X-Goog-Upload-URL", location) + header.Set("X-Goog-Upload-Status", "active") + } + return jsonResponse{ + data: newObjectResponse(obj.ObjectAttrs, s.externalURL), + header: header, } - s.uploads[uploadID] = obj - w.Header().Set("Location", s.URL()+"/upload/resumable/"+uploadID) - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(obj) } -func (s *Server) uploadFileContent(w http.ResponseWriter, r *http.Request) { - uploadID := mux.Vars(r)["uploadId"] - s.mtx.Lock() - defer s.mtx.Unlock() - obj, ok := s.uploads[uploadID] +// uploadFileContent accepts a chunk of a resumable upload +// +// A resumable upload is sent in one or more chunks. The request's +// "Content-Range" header is used to determine if more data is expected. +// +// When sending streaming content, the total size is unknown until the stream +// is exhausted. The Go client always sends streaming content. The sequence of +// "Content-Range" headers for 2600-byte content sent in 1000-byte chunks are: +// +// Content-Range: bytes 0-999/* +// Content-Range: bytes 1000-1999/* +// Content-Range: bytes 2000-2599/* +// Content-Range: bytes */2600 +// +// When sending chunked content of a known size, the total size is sent as +// well. The Python client uses this method to upload files and in-memory +// content. The sequence of "Content-Range" headers for the 2600-byte content +// sent in 1000-byte chunks are: +// +// Content-Range: bytes 0-999/2600 +// Content-Range: bytes 1000-1999/2600 +// Content-Range: bytes 2000-2599/2600 +// +// The server collects the content, analyzes the "Content-Range", and returns a +// "308 Permanent Redirect" response if more chunks are expected, and a +// "200 OK" response if the upload is complete (the Go client also accepts a +// "201 Created" response). The "Range" header in the response should be set to +// the size of the content received so far, such as: +// +// Range: bytes 0-2000 +// +// The client (such as the Go client) can send a header "X-Guploader-No-308" if +// it can't process a native "308 Permanent Redirect". The in-process response +// then has a status of "200 OK", with a header "X-Http-Status-Code-Override" +// set to "308". +func (s *Server) uploadFileContent(r *http.Request) jsonResponse { + uploadID := r.URL.Query().Get("upload_id") + rawObj, ok := s.uploads.Load(uploadID) if !ok { - http.Error(w, "upload not found", http.StatusNotFound) - return + return jsonResponse{status: http.StatusNotFound} } + obj := rawObj.(Object) + // TODO: stream upload file content to and from disk (when using the FS + // backend, at least) instead of loading the entire content into memory. content, err := loadContent(r.Body) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + return jsonResponse{errorMessage: err.Error()} } commit := true - status := http.StatusCreated - objLength := len(obj.Content) + status := http.StatusOK obj.Content = append(obj.Content, content...) - obj.Crc32c = encodedCrc32cChecksum(obj.Content) - obj.Md5Hash = encodedMd5Hash(obj.Content) + obj.Crc32c = checksum.EncodedCrc32cChecksum(obj.Content) + obj.Md5Hash = checksum.EncodedMd5Hash(obj.Content) + obj.Etag = obj.Md5Hash + contentTypeHeader := r.Header.Get(contentTypeHeader) + if contentTypeHeader != "" { + obj.ContentType = contentTypeHeader + } else { + obj.ContentType = "application/octet-stream" + } + responseHeader := make(http.Header) if contentRange := r.Header.Get("Content-Range"); contentRange != "" { - commit, err = parseRange(contentRange, objLength, len(content), w) + parsed, err := parseContentRange(contentRange) if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return + return jsonResponse{errorMessage: err.Error(), status: http.StatusBadRequest} + } + if parsed.KnownRange { + // Middle of streaming request, or any part of chunked request + responseHeader.Set("Range", fmt.Sprintf("bytes=0-%d", parsed.End)) + // Complete if the range covers the known total + commit = parsed.KnownTotal && (parsed.End+1 >= parsed.Total) + } else { + // End of a streaming request + responseHeader.Set("Range", fmt.Sprintf("bytes=0-%d", len(obj.Content))) } } if commit { - delete(s.uploads, uploadID) - err = s.createObject(obj) + s.uploads.Delete(uploadID) + streamingObject, err := s.createObject(obj.StreamingObject(), backend.NoConditions{}) + if err != nil { + return errToJsonResponse(err) + } + defer streamingObject.Close() + obj, err = streamingObject.BufferedObject() if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return + return errToJsonResponse(err) } } else { - status = http.StatusOK - w.Header().Set("X-Http-Status-Code-Override", "308") - s.uploads[uploadID] = obj - } - data, _ := json.Marshal(obj) - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Content-Length", strconv.Itoa(len(data))) - w.WriteHeader(status) - w.Write(data) + if _, no308 := r.Header["X-Guploader-No-308"]; no308 { + // Go client + responseHeader.Set("X-Http-Status-Code-Override", "308") + } else { + // Python client + status = http.StatusPermanentRedirect + } + s.uploads.Store(uploadID, obj) + } + if r.Header.Get("X-Goog-Upload-Command") == "upload, finalize" { + responseHeader.Set("X-Goog-Upload-Status", "final") + } + return jsonResponse{ + status: status, + data: newObjectResponse(obj.ObjectAttrs, s.externalURL), + header: responseHeader, + } } -func parseRange(r string, objLength, bodyLength int, w http.ResponseWriter) (finished bool, err error) { +// Parse a Content-Range header +// Some possible valid header values: +// +// bytes 0-1023/4096 (first 1024 bytes of a 4096-byte document) +// bytes 1024-2047/* (second 1024 bytes of a streaming document) +// bytes */4096 (The end of 4096 byte streaming document) +// bytes 0-*/* (start and end of a streaming document as sent by nodeJS client lib) +// bytes */* (start and end of a streaming document as sent by the C++ SDK) +func parseContentRange(r string) (parsed contentRange, err error) { invalidErr := fmt.Errorf("invalid Content-Range: %v", r) + + // Require that units == "bytes" const bytesPrefix = "bytes " - var contentLength int if !strings.HasPrefix(r, bytesPrefix) { - return false, invalidErr + return parsed, invalidErr } + + // Split range from total length parts := strings.SplitN(r[len(bytesPrefix):], "/", 2) if len(parts) != 2 { - return false, invalidErr + return parsed, invalidErr } - var rangeStart, rangeEnd int + // Process range if parts[0] == "*" { - rangeStart = objLength - rangeEnd = objLength + bodyLength + parsed.Start = -1 + parsed.End = -1 } else { rangeParts := strings.SplitN(parts[0], "-", 2) if len(rangeParts) != 2 { - return false, invalidErr + return parsed, invalidErr } - rangeStart, err = strconv.Atoi(rangeParts[0]) + + parsed.Start, err = strconv.Atoi(rangeParts[0]) if err != nil { - return false, invalidErr + return parsed, invalidErr } - rangeEnd, err = strconv.Atoi(rangeParts[1]) - if err != nil { - return false, invalidErr + + if rangeParts[1] == "*" { + parsed.End = -1 + } else { + parsed.KnownRange = true + parsed.End, err = strconv.Atoi(rangeParts[1]) + if err != nil { + return parsed, invalidErr + } } } - contentLength = objLength + bodyLength - finished = rangeEnd == contentLength - w.Header().Set("Range", fmt.Sprintf("bytes=%d-%d", rangeStart, rangeEnd)) + // Process total length + if parts[1] == "*" { + parsed.Total = -1 + } else { + parsed.KnownTotal = true + parsed.Total, err = strconv.Atoi(parts[1]) + if err != nil { + return parsed, invalidErr + } + } - return finished, nil + return parsed, nil } func loadMetadata(rc io.ReadCloser) (*multipartMetadata, error) { @@ -255,7 +639,7 @@ func loadMetadata(rc io.ReadCloser) (*multipartMetadata, error) { func loadContent(rc io.ReadCloser) ([]byte, error) { defer rc.Close() - return ioutil.ReadAll(rc) + return io.ReadAll(rc) } func generateUploadID() (string, error) { diff --git a/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go new file mode 100644 index 0000000000000..50d9661df84c8 --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/fakestorage/xml_response.go @@ -0,0 +1,123 @@ +package fakestorage + +import ( + "encoding/xml" + "net/http" + "strings" +) + +type xmlResponse struct { + status int + header http.Header + data any + errorMessage string +} + +type xmlResponseBody struct { + XMLName xml.Name `xml:"PostResponse"` + Bucket string + Etag struct { + Value string `xml:",innerxml"` + } + Key string + Location string +} + +type ListBucketResult struct { + XMLName xml.Name `xml:"ListBucketResult"` + Name string `xml:"Name"` + CommonPrefixes []CommonPrefix `xml:"CommonPrefixes,omitempty"` + Delimiter string `xml:"Delimiter"` + Prefix string `xml:"Prefix"` + KeyCount int `xml:"KeyCount"` + Contents []Contents `xml:"Contents"` +} + +type Contents struct { + XMLName xml.Name `xml:"Contents"` + Key string `xml:"Key"` + Generation int64 `xml:"Generation"` + LastModified string `xml:"LastModified"` + ETag ETag + Size int64 `xml:"Size"` +} + +type CommonPrefix struct { + Prefix string `xml:"Prefix"` +} + +type ETag struct { + Value string `xml:",innerxml"` +} + +func (e *ETag) Equals(etag string) bool { + trim := func(s string) string { + return strings.TrimPrefix(strings.TrimSuffix(s, "\""), "\"") + } + return trim(e.Value) == trim(etag) +} + +type xmlHandler = func(r *http.Request) xmlResponse + +func xmlToHTTPHandler(h xmlHandler) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + resp := h(r) + w.Header().Set("Content-Type", "application/xml") + for name, values := range resp.header { + for _, value := range values { + w.Header().Add(name, value) + } + } + + status := resp.getStatus() + var data any + if status > 399 { + data = newErrorResponse(status, resp.getErrorMessage(status), nil) + } else { + data = resp.data + } + + w.WriteHeader(status) + + dataBytes, ok := data.([]byte) + if ok { + w.Write(dataBytes) + } else { + xml.NewEncoder(w).Encode(data) + } + } +} + +func createXmlResponseBody(bucketName, etag, key, location string) []byte { + responseBody := xmlResponseBody{ + Bucket: bucketName, + Etag: struct { + Value string `xml:",innerxml"` + }{etag}, + Location: location, + Key: key, + } + x, err := xml.Marshal(responseBody) + if err != nil { + return nil + } + + return []byte(xml.Header + string(x)) +} + +func (r *xmlResponse) getStatus() int { + if r.status > 0 { + return r.status + } + if r.errorMessage != "" { + return http.StatusInternalServerError + } + return http.StatusOK +} + +func (r *xmlResponse) getErrorMessage(status int) string { + if r.errorMessage != "" { + return r.errorMessage + } + return http.StatusText(status) +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go new file mode 100644 index 0000000000000..e56a7aa7950a5 --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/bucket.go @@ -0,0 +1,22 @@ +// Copyright 2019 Francisco Souza. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package backend + +import "time" + +// Bucket represents the bucket that is stored within the fake server. +type Bucket struct { + Name string + VersioningEnabled bool + TimeCreated time.Time + DefaultEventBasedHold bool +} + +const bucketMetadataSuffix = ".bucketMetadata" + +type BucketAttrs struct { + DefaultEventBasedHold bool + VersioningEnabled bool +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go index 24b1b2cb9437e..c96867d802df6 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/fs.go @@ -5,129 +5,465 @@ package backend import ( + "bytes" "encoding/json" + "errors" "fmt" - "io/ioutil" + "io" + "io/fs" "net/url" "os" - "path" "path/filepath" "strings" + "sync" + "syscall" + "time" + + "github.com/fsouza/fake-gcs-server/internal/checksum" + "github.com/pkg/xattr" ) -// StorageFS is an implementation of the backend storage that stores data on disk +// storageFS is an implementation of the backend storage that stores data on disk +// // The layout is the following: +// // - rootDir -// |- bucket1 -// \- bucket2 -// |- object1 -// \- object2 +// +// |- bucket1 +// \- bucket2 +// |- object1 +// \- object2 +// // Bucket and object names are url path escaped, so there's no special meaning of forward slashes. -type StorageFS struct { +type storageFS struct { rootDir string + mtx sync.RWMutex + mh metadataHandler } -// NewStorageFS creates an instance of StorageMemory -func NewStorageFS(objects []Object, rootDir string) (Storage, error) { +// NewStorageFS creates an instance of the filesystem-backed storage backend. +func NewStorageFS(objects []StreamingObject, rootDir string) (Storage, error) { if !strings.HasSuffix(rootDir, "/") { rootDir += "/" } - s := &StorageFS{ - rootDir: rootDir, + err := os.MkdirAll(rootDir, 0o700) + if err != nil { + return nil, err } + + var mh metadataHandler = metadataFile{} + // Use xattr for metadata if rootDir supports it. + if xattr.XATTR_SUPPORTED { + xattrHandler := metadataXattr{} + var xerr *xattr.Error + _, err = xattrHandler.read(rootDir) + if err == nil || (errors.As(err, &xerr) && xerr.Err == xattr.ENOATTR) { + mh = xattrHandler + } + } + + s := &storageFS{rootDir: rootDir, mh: mh} for _, o := range objects { - err := s.CreateObject(o) + obj, err := s.CreateObject(o, NoConditions{}) if err != nil { return nil, err } + obj.Close() } return s, nil } -// CreateBucket creates a bucket -func (s *StorageFS) CreateBucket(name string) error { - return os.MkdirAll(filepath.Join(s.rootDir, url.PathEscape(name)), 0700) +// CreateBucket creates a bucket in the fs backend. A bucket is a folder in the +// root directory. +func (s *storageFS) CreateBucket(name string, bucketAttrs BucketAttrs) error { + s.mtx.Lock() + defer s.mtx.Unlock() + return s.createBucket(name, bucketAttrs) +} + +func (s *storageFS) createBucket(name string, bucketAttrs BucketAttrs) error { + if bucketAttrs.VersioningEnabled { + return errors.New("not implemented: fs storage type does not support versioning yet") + } + path := filepath.Join(s.rootDir, url.PathEscape(name)) + err := os.MkdirAll(path, 0o700) + if err != nil { + return err + } + encoded, err := json.Marshal(bucketAttrs) + if err != nil { + return err + } + return writeFile(path+bucketMetadataSuffix, encoded, 0o600) } -// ListBuckets lists buckets -func (s *StorageFS) ListBuckets() ([]string, error) { - infos, err := ioutil.ReadDir(s.rootDir) +// ListBuckets returns a list of buckets from the list of directories in the +// root directory. +func (s *storageFS) ListBuckets() ([]Bucket, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + infos, err := os.ReadDir(s.rootDir) if err != nil { return nil, err } - buckets := []string{} + buckets := []Bucket{} for _, info := range infos { if info.IsDir() { unescaped, err := url.PathUnescape(info.Name()) if err != nil { - return nil, fmt.Errorf("failed to unescape object name %s: %s", info.Name(), err) + return nil, fmt.Errorf("failed to unescape object name %s: %w", info.Name(), err) + } + fileInfo, err := info.Info() + if err != nil { + return nil, fmt.Errorf("failed to get file info for %s: %w", info.Name(), err) } - buckets = append(buckets, unescaped) + buckets = append(buckets, Bucket{Name: unescaped, TimeCreated: timespecToTime(createTimeFromFileInfo(fileInfo))}) } } return buckets, nil } -// GetBucket checks if a bucket exists -func (s *StorageFS) GetBucket(name string) error { - _, err := os.Stat(filepath.Join(s.rootDir, url.PathEscape(name))) - return err +func timespecToTime(ts syscall.Timespec) time.Time { + return time.Unix(int64(ts.Sec), int64(ts.Nsec)) } -// CreateObject stores an object -func (s *StorageFS) CreateObject(obj Object) error { - err := s.CreateBucket(obj.BucketName) +func (s *storageFS) UpdateBucket(bucketName string, attrsToUpdate BucketAttrs) error { + if attrsToUpdate.VersioningEnabled { + return errors.New("not implemented: fs storage type does not support versioning yet") + } + encoded, err := json.Marshal(attrsToUpdate) if err != nil { return err } - encoded, err := json.Marshal(obj) + path := filepath.Join(s.rootDir, url.PathEscape(bucketName)) + return writeFile(path+bucketMetadataSuffix, encoded, 0o600) +} + +// GetBucket returns information about the given bucket, or an error if it +// doesn't exist. +func (s *storageFS) GetBucket(name string) (Bucket, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + path := filepath.Join(s.rootDir, url.PathEscape(name)) + dirInfo, err := os.Stat(path) if err != nil { - return err + return Bucket{}, err } - return ioutil.WriteFile(filepath.Join(s.rootDir, url.PathEscape(obj.BucketName), url.PathEscape(obj.Name)), encoded, 0664) + attrs, err := getBucketAttributes(path) + if err != nil { + return Bucket{}, err + } + return Bucket{Name: name, VersioningEnabled: false, TimeCreated: timespecToTime(createTimeFromFileInfo(dirInfo)), DefaultEventBasedHold: attrs.DefaultEventBasedHold}, err } -// ListObjects lists the objects in a given bucket with a given prefix and delimeter -func (s *StorageFS) ListObjects(bucketName string) ([]Object, error) { - infos, err := ioutil.ReadDir(path.Join(s.rootDir, url.PathEscape(bucketName))) +func getBucketAttributes(path string) (BucketAttrs, error) { + content, err := os.ReadFile(path + bucketMetadataSuffix) if err != nil { - return nil, err + if os.IsNotExist(err) { + return BucketAttrs{}, nil + } + return BucketAttrs{}, err } - objects := []Object{} - for _, info := range infos { - unescaped, err := url.PathUnescape(info.Name()) + var attrs BucketAttrs + err = json.Unmarshal(content, &attrs) + if err != nil { + return BucketAttrs{}, err + } + return attrs, nil +} + +// DeleteBucket removes the bucket from the backend. +func (s *storageFS) DeleteBucket(name string) error { + objs, err := s.ListObjects(name, "", false) + if err != nil { + return BucketNotFound + } + if len(objs) > 0 { + return BucketNotEmpty + } + + s.mtx.Lock() + defer s.mtx.Unlock() + return os.RemoveAll(filepath.Join(s.rootDir, url.PathEscape(name))) +} + +// CreateObject stores an object as a regular file on disk. The backing content +// for the object may be in the same file that's being updated, so a temporary +// file is first created and then moved into place. This also makes it so any +// object content readers currently open continue reading from the original +// file instead of the newly created file. +// +// The crc32c checksum and md5 hash of the object content is calculated when +// reading the object content. Any checksum or hash in the passed-in object +// metadata is overwritten. +func (s *storageFS) CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error) { + if obj.Generation > 0 { + return StreamingObject{}, errors.New("not implemented: fs storage type does not support objects generation yet") + } + + // Note: this was a quick fix for issue #701. Now that we have a way to + // persist object attributes, we should implement versioning in the + // filesystem backend and handle generations outside of the backends. + obj.Generation = time.Now().UnixNano() / 1000 + + s.mtx.Lock() + defer s.mtx.Unlock() + err := s.createBucket(obj.BucketName, BucketAttrs{VersioningEnabled: false}) + if err != nil { + return StreamingObject{}, err + } + + var activeGeneration int64 + existingObj, err := s.getObject(obj.BucketName, obj.Name) + if err != nil { + activeGeneration = 0 + } else { + activeGeneration = existingObj.Generation + } + + if !conditions.ConditionsMet(activeGeneration) { + return StreamingObject{}, PreConditionFailed + } + + path := filepath.Join(s.rootDir, url.PathEscape(obj.BucketName), obj.Name) + if err = os.MkdirAll(filepath.Dir(path), 0o700); err != nil { + return StreamingObject{}, err + } + + // Nothing to do if this operation only creates directories + if strings.HasSuffix(obj.Name, "/") { + // TODO: populate Crc32c, Md5Hash, and Etag + return StreamingObject{obj.ObjectAttrs, noopSeekCloser{bytes.NewReader([]byte{})}}, nil + } + + var buf bytes.Buffer + hasher := checksum.NewStreamingHasher() + objectContent := io.TeeReader(obj.Content, hasher) + + if _, err = io.Copy(&buf, objectContent); err != nil { + return StreamingObject{}, err + } + + if obj.Crc32c == "" { + obj.Crc32c = hasher.EncodedCrc32cChecksum() + } + if obj.Md5Hash == "" { + obj.Md5Hash = hasher.EncodedMd5Hash() + } + if obj.Etag == "" { + obj.Etag = obj.Md5Hash + } + + // TODO: Handle if metadata is not present more gracefully? + encoded, err := json.Marshal(obj.ObjectAttrs) + if err != nil { + return StreamingObject{}, err + } + + if err := writeFile(path, buf.Bytes(), 0o600); err != nil { + return StreamingObject{}, err + } + + if err = s.mh.write(path, encoded); err != nil { + return StreamingObject{}, err + } + + err = openObjectAndSetSize(&obj, path) + + return obj, err +} + +// ListObjects lists the objects in a given bucket with a given prefix and +// delimeter. +func (s *storageFS) ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + objects := []ObjectAttrs{} + bucketPath := filepath.Join(s.rootDir, url.PathEscape(bucketName)) + if err := filepath.Walk(bucketPath, func(path string, info fs.FileInfo, err error) error { if err != nil { - return nil, fmt.Errorf("failed to unescape object name %s: %s", info.Name(), err) + return err + } + + objName, _ := filepath.Rel(bucketPath, path) + if s.mh.isSpecialFile(info.Name()) { + return nil + } + if info.IsDir() { + return nil } - object, err := s.GetObject(bucketName, unescaped) + if prefix != "" && !strings.HasPrefix(objName, prefix) { + return nil + } + objAttrs, err := s.getObjectAttrs(bucketName, objName) if err != nil { - return nil, err + return err } - objects = append(objects, object) + objects = append(objects, objAttrs) + return nil + }); err != nil { + return nil, err } return objects, nil } -// GetObject get an object by bucket and name -func (s *StorageFS) GetObject(bucketName, objectName string) (Object, error) { - encoded, err := ioutil.ReadFile(filepath.Join(s.rootDir, url.PathEscape(bucketName), url.PathEscape(objectName))) +// GetObject get an object by bucket and name. +func (s *storageFS) GetObject(bucketName, objectName string) (StreamingObject, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + return s.getObject(bucketName, objectName) +} + +// GetObjectWithGeneration retrieves an specific version of the object. Not +// implemented for this backend. +func (s *storageFS) GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error) { + obj, err := s.GetObject(bucketName, objectName) if err != nil { - return Object{}, err + return obj, err } - var obj Object - err = json.Unmarshal(encoded, &obj) - if err != nil { - return Object{}, err + if obj.Generation != generation { + return obj, fmt.Errorf("generation mismatch, object generation is %v, requested generation is %v (note: filesystem backend does not support versioning)", obj.Generation, generation) } - obj.Name = objectName - obj.BucketName = bucketName return obj, nil } -// DeleteObject deletes an object by bucket and name -func (s *StorageFS) DeleteObject(bucketName, objectName string) error { +func (s *storageFS) getObject(bucketName, objectName string) (StreamingObject, error) { + attrs, err := s.getObjectAttrs(bucketName, objectName) + if err != nil { + return StreamingObject{}, err + } + + obj := StreamingObject{ObjectAttrs: attrs} + path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName) + err = openObjectAndSetSize(&obj, path) + + return obj, err +} + +func openObjectAndSetSize(obj *StreamingObject, path string) error { + info, err := os.Stat(path) + if err != nil { + return err + } + + obj.Content = newLazyReader(path) + obj.Size = info.Size() + + return nil +} + +func (s *storageFS) getObjectAttrs(bucketName, objectName string) (ObjectAttrs, error) { + path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName) + encoded, err := s.mh.read(path) + if err != nil { + return ObjectAttrs{}, err + } + + var attrs ObjectAttrs + if err = json.Unmarshal(encoded, &attrs); err != nil { + return ObjectAttrs{}, err + } + + info, err := os.Stat(path) + if err != nil { + return ObjectAttrs{}, fmt.Errorf("failed to stat: %w", err) + } + + attrs.Name = filepath.ToSlash(objectName) + attrs.BucketName = bucketName + attrs.Size = info.Size() + return attrs, nil +} + +// DeleteObject deletes an object by bucket and name. +func (s *storageFS) DeleteObject(bucketName, objectName string) error { + s.mtx.Lock() + defer s.mtx.Unlock() if objectName == "" { - return fmt.Errorf("can't delete object with empty name") + return errors.New("can't delete object with empty name") } - return os.Remove(filepath.Join(s.rootDir, url.PathEscape(bucketName), url.PathEscape(objectName))) + path := filepath.Join(s.rootDir, url.PathEscape(bucketName), objectName) + if err := s.mh.remove(path); err != nil { + return err + } + return os.Remove(path) +} + +func (s *storageFS) PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) { + obj, err := s.GetObject(bucketName, objectName) + if err != nil { + return StreamingObject{}, err + } + defer obj.Close() + + obj.patch(attrsToUpdate) + obj.Generation = 0 // reset generation id + return s.CreateObject(obj, NoConditions{}) +} + +func (s *storageFS) UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) { + obj, err := s.GetObject(bucketName, objectName) + if err != nil { + return StreamingObject{}, err + } + defer obj.Close() + + if attrsToUpdate.Metadata != nil { + obj.Metadata = map[string]string{} + } + obj.patch(attrsToUpdate) + obj.Generation = 0 // reset generation id + return s.CreateObject(obj, NoConditions{}) +} + +type concatenatedContent struct { + io.Reader +} + +func (c concatenatedContent) Close() error { + return errors.New("not implemented") +} + +func (c concatenatedContent) Seek(offset int64, whence int) (int64, error) { + return 0, errors.New("not implemented") +} + +func concatObjectReaders(objects []StreamingObject) io.ReadSeekCloser { + readers := make([]io.Reader, len(objects)) + for i := range objects { + readers[i] = objects[i].Content + } + return concatenatedContent{io.MultiReader(readers...)} +} + +func (s *storageFS) ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error) { + var sourceObjects []StreamingObject + for _, n := range objectNames { + obj, err := s.GetObject(bucketName, n) + if err != nil { + return StreamingObject{}, err + } + defer obj.Close() + sourceObjects = append(sourceObjects, obj) + } + + dest := StreamingObject{ + ObjectAttrs: ObjectAttrs{ + BucketName: bucketName, + Name: destinationName, + ContentType: contentType, + Created: time.Now().String(), + }, + } + + dest.Content = concatObjectReaders(sourceObjects) + dest.Metadata = metadata + + result, err := s.CreateObject(dest, NoConditions{}) + if err != nil { + return result, err + } + + return result, nil } diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go new file mode 100644 index 0000000000000..8c30a3149213c --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/lazy_file.go @@ -0,0 +1,53 @@ +// Copyright 2022 Francisco Souza. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package backend + +import ( + "io" + "os" + "sync" +) + +type lazyReader struct { + filename string + once *sync.Once + f *os.File + err error +} + +func newLazyReader(filename string) io.ReadSeekCloser { + return &lazyReader{ + filename: filename, + once: &sync.Once{}, + } +} + +func (r *lazyReader) open() { + r.f, r.err = os.Open(r.filename) +} + +func (r *lazyReader) Read(p []byte) (int, error) { + r.once.Do(r.open) + if r.err != nil { + return 0, r.err + } + return r.f.Read(p) +} + +func (r *lazyReader) Seek(offset int64, whence int) (int64, error) { + r.once.Do(r.open) + if r.err != nil { + return 0, r.err + } + return r.f.Seek(offset, whence) +} + +func (r *lazyReader) Close() error { + r.once.Do(r.open) + if r.err != nil { + return r.err + } + return r.f.Close() +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go index 257843ad36308..c32f06abf2cd4 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/memory.go @@ -7,118 +7,386 @@ package backend import ( "errors" "fmt" + "io" + "strings" "sync" + "time" + + "github.com/fsouza/fake-gcs-server/internal/checksum" ) -// StorageMemory is an implementation of the backend storage that stores data in memory -type StorageMemory struct { - buckets map[string][]Object +const timestampFormat = "2006-01-02T15:04:05.999999Z07:00" + +// storageMemory is an implementation of the backend storage that stores data +// in memory. +type storageMemory struct { + buckets map[string]bucketInMemory mtx sync.RWMutex } -// NewStorageMemory creates an instance of StorageMemory -func NewStorageMemory(objects []Object) Storage { - s := &StorageMemory{ - buckets: make(map[string][]Object), +type bucketInMemory struct { + Bucket + // maybe we can refactor how the memory backend works? no need to store + // Object instances. + activeObjects []Object + archivedObjects []Object +} + +func newBucketInMemory(name string, versioningEnabled bool, bucketAttrs BucketAttrs) bucketInMemory { + return bucketInMemory{Bucket{name, versioningEnabled, time.Now(), bucketAttrs.DefaultEventBasedHold}, []Object{}, []Object{}} +} + +func (bm *bucketInMemory) addObject(obj Object) Object { + if obj.Crc32c == "" { + obj.Crc32c = checksum.EncodedCrc32cChecksum(obj.Content) + } + if obj.Md5Hash == "" { + obj.Md5Hash = checksum.EncodedMd5Hash(obj.Content) + } + if obj.Etag == "" { + obj.Etag = obj.Md5Hash + } + if obj.Size == 0 { + obj.Size = int64(len(obj.Content)) + } + obj.Generation = getNewGenerationIfZero(obj.Generation) + index := findObject(obj, bm.activeObjects, false) + if index >= 0 { + if bm.VersioningEnabled { + bm.activeObjects[index].Deleted = time.Now().Format(timestampFormat) + bm.cpToArchive(bm.activeObjects[index]) + } + bm.activeObjects[index] = obj + } else { + bm.activeObjects = append(bm.activeObjects, obj) + } + + return obj +} + +func getNewGenerationIfZero(generation int64) int64 { + if generation == 0 { + return time.Now().UnixNano() / 1000 + } + return generation +} + +func (bm *bucketInMemory) deleteObject(obj Object, matchGeneration bool) { + index := findObject(obj, bm.activeObjects, matchGeneration) + if index < 0 { + return + } + if bm.VersioningEnabled { + obj.Deleted = time.Now().Format(timestampFormat) + bm.mvToArchive(obj) + } else { + bm.deleteFromObjectList(obj, true) + } +} + +func (bm *bucketInMemory) cpToArchive(obj Object) { + bm.archivedObjects = append(bm.archivedObjects, obj) +} + +func (bm *bucketInMemory) mvToArchive(obj Object) { + bm.cpToArchive(obj) + bm.deleteFromObjectList(obj, true) +} + +func (bm *bucketInMemory) deleteFromObjectList(obj Object, active bool) { + objects := bm.activeObjects + if !active { + objects = bm.archivedObjects + } + index := findObject(obj, objects, !active) + objects[index] = objects[len(objects)-1] + if active { + bm.activeObjects = objects[:len(objects)-1] + } else { + bm.archivedObjects = objects[:len(objects)-1] + } +} + +// findObject looks for an object in the given list and return the index where it +// was found, or -1 if the object doesn't exist. +func findObject(obj Object, objectList []Object, matchGeneration bool) int { + for i, o := range objectList { + if matchGeneration && obj.ID() == o.ID() { + return i + } + if !matchGeneration && obj.IDNoGen() == o.IDNoGen() { + return i + } + } + return -1 +} + +// findLastObjectGeneration looks for an object in the given list and return the index where it +// was found, or -1 if the object doesn't exist. +func findLastObjectGeneration(obj Object, objectList []Object) int64 { + highScore := int64(0) + for _, o := range objectList { + if obj.IDNoGen() == o.IDNoGen() && o.Generation > highScore { + highScore = o.Generation + } + } + return highScore +} + +// NewStorageMemory creates an instance of StorageMemory. +func NewStorageMemory(objects []StreamingObject) (Storage, error) { + s := &storageMemory{ + buckets: make(map[string]bucketInMemory), } for _, o := range objects { - s.buckets[o.BucketName] = append(s.buckets[o.BucketName], o) + bufferedObject, err := o.BufferedObject() + if err != nil { + return nil, err + } + s.CreateBucket(o.BucketName, BucketAttrs{false, false}) + bucket := s.buckets[o.BucketName] + bucket.addObject(bufferedObject) + s.buckets[o.BucketName] = bucket } - return s + return s, nil } -// CreateBucket creates a bucket -func (s *StorageMemory) CreateBucket(name string) error { - s.mtx.Lock() - defer s.mtx.Unlock() - if _, ok := s.buckets[name]; !ok { - s.buckets[name] = nil +func (s *storageMemory) UpdateBucket(bucketName string, attrsToUpdate BucketAttrs) error { + bucketInMemory, err := s.getBucketInMemory(bucketName) + if err != nil { + return err } + bucketInMemory.DefaultEventBasedHold = attrsToUpdate.DefaultEventBasedHold + bucketInMemory.VersioningEnabled = attrsToUpdate.VersioningEnabled + s.buckets[bucketName] = bucketInMemory return nil } -// ListBuckets lists buckets -func (s *StorageMemory) ListBuckets() ([]string, error) { +// CreateBucket creates a bucket. +func (s *storageMemory) CreateBucket(name string, bucketAttrs BucketAttrs) error { s.mtx.Lock() defer s.mtx.Unlock() - buckets := []string{} - for bucket := range s.buckets { - buckets = append(buckets, bucket) + bucket, err := s.getBucketInMemory(name) + if err == nil { + if bucket.VersioningEnabled != bucketAttrs.VersioningEnabled { + return fmt.Errorf("a bucket named %s already exists, but with different properties", name) + } + return nil + } + s.buckets[name] = newBucketInMemory(name, bucketAttrs.VersioningEnabled, bucketAttrs) + return nil +} + +// ListBuckets lists buckets currently registered in the backend. +func (s *storageMemory) ListBuckets() ([]Bucket, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + buckets := []Bucket{} + for _, bucketInMemory := range s.buckets { + buckets = append(buckets, Bucket{bucketInMemory.Name, bucketInMemory.VersioningEnabled, bucketInMemory.TimeCreated, false}) } return buckets, nil } -// GetBucket checks if a bucket exists -func (s *StorageMemory) GetBucket(name string) error { - s.mtx.Lock() - defer s.mtx.Unlock() +// GetBucket retrieves the bucket information from the backend. +func (s *storageMemory) GetBucket(name string) (Bucket, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + bucketInMemory, err := s.getBucketInMemory(name) + return Bucket{bucketInMemory.Name, bucketInMemory.VersioningEnabled, bucketInMemory.TimeCreated, bucketInMemory.DefaultEventBasedHold}, err +} - if _, ok := s.buckets[name]; !ok { - return fmt.Errorf("no bucket named %s", name) +func (s *storageMemory) getBucketInMemory(name string) (bucketInMemory, error) { + if bucketInMemory, found := s.buckets[name]; found { + return bucketInMemory, nil } - return nil + return bucketInMemory{}, fmt.Errorf("no bucket named %s", name) } -// CreateObject stores an object -func (s *StorageMemory) CreateObject(obj Object) error { +// DeleteBucket removes the bucket from the backend. +func (s *storageMemory) DeleteBucket(name string) error { + objs, err := s.ListObjects(name, "", false) + if err != nil { + return BucketNotFound + } + if len(objs) > 0 { + return BucketNotEmpty + } + s.mtx.Lock() defer s.mtx.Unlock() - - index := s.findObject(obj) - if index < 0 { - s.buckets[obj.BucketName] = append(s.buckets[obj.BucketName], obj) - } else { - s.buckets[obj.BucketName][index] = obj - } + delete(s.buckets, name) return nil } -// findObject looks for an object in its bucket and return the index where it -// was found, or -1 if the object doesn't exist. -// -// It doesn't lock the mutex, callers must lock the mutex before calling this -// method. -func (s *StorageMemory) findObject(obj Object) int { - for i, o := range s.buckets[obj.BucketName] { - if obj.ID() == o.ID() { - return i - } +// CreateObject stores an object in the backend. +func (s *storageMemory) CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error) { + s.mtx.Lock() + defer s.mtx.Unlock() + bucketInMemory, err := s.getBucketInMemory(obj.BucketName) + if err != nil { + bucketInMemory = newBucketInMemory(obj.BucketName, false, BucketAttrs{}) } - return -1 + bufferedObj, err := obj.BufferedObject() + currentGeneration := findLastObjectGeneration(bufferedObj, bucketInMemory.activeObjects) + if !conditions.ConditionsMet(currentGeneration) { + return StreamingObject{}, PreConditionFailed + } + if err != nil { + return StreamingObject{}, err + } + newObj := bucketInMemory.addObject(bufferedObj) + s.buckets[obj.BucketName] = bucketInMemory + return newObj.StreamingObject(), nil } -// ListObjects lists the objects in a given bucket with a given prefix and delimeter -func (s *StorageMemory) ListObjects(bucketName string) ([]Object, error) { +// ListObjects lists the objects in a given bucket with a given prefix and +// delimiter. +func (s *storageMemory) ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error) { s.mtx.RLock() defer s.mtx.RUnlock() - objects, ok := s.buckets[bucketName] - if !ok { - return nil, errors.New("bucket not found") + bucketInMemory, err := s.getBucketInMemory(bucketName) + if err != nil { + return []ObjectAttrs{}, err + } + objAttrs := make([]ObjectAttrs, 0, len(bucketInMemory.activeObjects)) + for _, obj := range bucketInMemory.activeObjects { + if prefix != "" && !strings.HasPrefix(obj.Name, prefix) { + continue + } + objAttrs = append(objAttrs, obj.ObjectAttrs) + } + if !versions { + return objAttrs, nil } - return objects, nil + + archvObjs := make([]ObjectAttrs, 0, len(bucketInMemory.archivedObjects)) + for _, obj := range bucketInMemory.archivedObjects { + if prefix != "" && !strings.HasPrefix(obj.Name, prefix) { + continue + } + archvObjs = append(archvObjs, obj.ObjectAttrs) + } + return append(objAttrs, archvObjs...), nil } -// GetObject get an object by bucket and name -func (s *StorageMemory) GetObject(bucketName, objectName string) (Object, error) { - obj := Object{BucketName: bucketName, Name: objectName} +func (s *storageMemory) GetObject(bucketName, objectName string) (StreamingObject, error) { + return s.GetObjectWithGeneration(bucketName, objectName, 0) +} + +// GetObjectWithGeneration retrieves a specific version of the object. +func (s *storageMemory) GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error) { s.mtx.RLock() defer s.mtx.RUnlock() - index := s.findObject(obj) + bucketInMemory, err := s.getBucketInMemory(bucketName) + if err != nil { + return StreamingObject{}, err + } + matchGeneration := false + obj := Object{ObjectAttrs: ObjectAttrs{BucketName: bucketName, Name: objectName}} + listToConsider := bucketInMemory.activeObjects + if generation != 0 { + matchGeneration = true + obj.Generation = generation + listToConsider = append(listToConsider, bucketInMemory.archivedObjects...) + } + index := findObject(obj, listToConsider, matchGeneration) if index < 0 { - return obj, errors.New("object not found") + return obj.StreamingObject(), errors.New("object not found") } - return s.buckets[bucketName][index], nil + + return listToConsider[index].StreamingObject(), nil } -// DeleteObject deletes an object by bucket and name -func (s *StorageMemory) DeleteObject(bucketName, objectName string) error { - obj := Object{BucketName: bucketName, Name: objectName} - index := s.findObject(obj) - if index < 0 { - return fmt.Errorf("no such object in bucket %s: %s", bucketName, objectName) +func (s *storageMemory) DeleteObject(bucketName, objectName string) error { + obj, err := s.GetObject(bucketName, objectName) + if err != nil { + return err + } + s.mtx.Lock() + defer s.mtx.Unlock() + bucketInMemory, err := s.getBucketInMemory(bucketName) + if err != nil { + return err + } + bufferedObject, err := obj.BufferedObject() + if err != nil { + return err } - bucket := s.buckets[obj.BucketName] - bucket[index] = bucket[len(bucket)-1] - s.buckets[obj.BucketName] = bucket[:len(bucket)-1] + bucketInMemory.deleteObject(bufferedObject, true) + s.buckets[bucketName] = bucketInMemory return nil } + +func (s *storageMemory) PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) { + obj, err := s.GetObject(bucketName, objectName) + if err != nil { + return StreamingObject{}, err + } + + obj.patch(attrsToUpdate) + s.CreateObject(obj, NoConditions{}) + return obj, nil +} + +// UpdateObject replaces an object metadata, custom time, and acl. +func (s *storageMemory) UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) { + obj, err := s.GetObject(bucketName, objectName) + if err != nil { + return StreamingObject{}, err + } + + if attrsToUpdate.Metadata != nil { + obj.Metadata = map[string]string{} + } + obj.patch(attrsToUpdate) + s.CreateObject(obj, NoConditions{}) + return obj, nil +} + +func (s *storageMemory) ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error) { + var data []byte + for _, n := range objectNames { + obj, err := s.GetObject(bucketName, n) + if err != nil { + return StreamingObject{}, err + } + objectContent, err := io.ReadAll(obj.Content) + if err != nil { + return StreamingObject{}, err + } + data = append(data, objectContent...) + } + + var dest Object + streamingDest, err := s.GetObject(bucketName, destinationName) + if err != nil { + dest = Object{ + ObjectAttrs: ObjectAttrs{ + BucketName: bucketName, + Name: destinationName, + ContentType: contentType, + Created: time.Now().String(), + }, + } + } else { + dest, err = streamingDest.BufferedObject() + if err != nil { + return StreamingObject{}, err + } + } + + dest.Content = data + dest.Crc32c = "" + dest.Md5Hash = "" + dest.Etag = "" + dest.Size = 0 + dest.Metadata = metadata + + result, err := s.CreateObject(dest.StreamingObject(), NoConditions{}) + if err != nil { + return result, err + } + + return result, nil +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go new file mode 100644 index 0000000000000..6d9d2313d27dc --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata.go @@ -0,0 +1,13 @@ +// Copyright 2022 Francisco Souza. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package backend + +type metadataHandler interface { + write(path string, encoded []byte) error + read(path string) ([]byte, error) + remove(path string) error + isSpecialFile(path string) bool + rename(pathSrc, pathDst string) error +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go new file mode 100644 index 0000000000000..94cce654a8c69 --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_file.go @@ -0,0 +1,34 @@ +// Copyright 2022 Francisco Souza. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package backend + +import ( + "os" + "strings" +) + +const metadataSuffix = ".metadata" + +type metadataFile struct{} + +func (m metadataFile) write(path string, encoded []byte) error { + return writeFile(path+metadataSuffix, encoded, 0o600) +} + +func (m metadataFile) read(path string) ([]byte, error) { + return os.ReadFile(path + metadataSuffix) +} + +func (m metadataFile) isSpecialFile(path string) bool { + return strings.HasSuffix(path, metadataSuffix) +} + +func (m metadataFile) remove(path string) error { + return os.Remove(path + metadataSuffix) +} + +func (m metadataFile) rename(pathSrc, pathDst string) error { + return os.Rename(pathSrc+metadataSuffix, pathDst+metadataSuffix) +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go new file mode 100644 index 0000000000000..9d40580120be6 --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/metadata_xattr.go @@ -0,0 +1,33 @@ +// Copyright 2022 Francisco Souza. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package backend + +import ( + "github.com/pkg/xattr" +) + +const xattrKey = "user.metadata" + +type metadataXattr struct{} + +func (m metadataXattr) write(path string, encoded []byte) error { + return xattr.Set(path, xattrKey, encoded) +} + +func (m metadataXattr) read(path string) ([]byte, error) { + return xattr.Get(path, xattrKey) +} + +func (m metadataXattr) isSpecialFile(path string) bool { + return false +} + +func (m metadataXattr) remove(path string) error { + return nil +} + +func (m metadataXattr) rename(pathSrc, pathDst string) error { + return nil +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go index e0ca2b12ec571..63bf8d6d147c3 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/object.go @@ -4,16 +4,104 @@ package backend +import ( + "bytes" + "fmt" + "io" + "reflect" + + "cloud.google.com/go/storage" +) + +// ObjectAttrs represents the meta-data without its contents. +type ObjectAttrs struct { + BucketName string `json:"-"` + Name string `json:"-"` + Size int64 `json:"-"` + ContentType string + ContentEncoding string + ContentDisposition string + CacheControl string + Crc32c string + Md5Hash string + Etag string + ACL []storage.ACLRule + Metadata map[string]string + Created string + Deleted string + Updated string + CustomTime string + Generation int64 +} + +// ID is used for comparing objects. +func (o *ObjectAttrs) ID() string { + return fmt.Sprintf("%s#%d", o.IDNoGen(), o.Generation) +} + +// IDNoGen does not consider the generation field. +func (o *ObjectAttrs) IDNoGen() string { + return fmt.Sprintf("%s/%s", o.BucketName, o.Name) +} + // Object represents the object that is stored within the fake server. type Object struct { - BucketName string `json:"-"` - Name string `json:"-"` - Content []byte - Crc32c string - Md5Hash string + ObjectAttrs + Content []byte +} + +type noopSeekCloser struct { + io.ReadSeeker +} + +func (n noopSeekCloser) Close() error { + return nil +} + +func (o Object) StreamingObject() StreamingObject { + return StreamingObject{ + ObjectAttrs: o.ObjectAttrs, + Content: noopSeekCloser{bytes.NewReader(o.Content)}, + } +} + +type StreamingObject struct { + ObjectAttrs + Content io.ReadSeekCloser +} + +func (o *StreamingObject) Close() error { + if o != nil && o.Content != nil { + return o.Content.Close() + } + return nil +} + +// Convert this StreamingObject to a (buffered) Object. +func (o *StreamingObject) BufferedObject() (Object, error) { + data, err := io.ReadAll(o.Content) + return Object{ + ObjectAttrs: o.ObjectAttrs, + Content: data, + }, err } -// ID is useful for comparing objects -func (o *Object) ID() string { - return o.BucketName + "/" + o.Name +func (o *StreamingObject) patch(attrsToUpdate ObjectAttrs) { + currObjValues := reflect.ValueOf(&(o.ObjectAttrs)).Elem() + currObjType := currObjValues.Type() + newObjValues := reflect.ValueOf(attrsToUpdate) + for i := 0; i < newObjValues.NumField(); i++ { + if reflect.Value.IsZero(newObjValues.Field(i)) { + continue + } else if currObjType.Field(i).Name == "Metadata" { + if o.Metadata == nil { + o.Metadata = map[string]string{} + } + for k, v := range attrsToUpdate.Metadata { + o.Metadata[k] = v + } + } else { + currObjValues.Field(i).Set(newObjValues.Field(i)) + } + } } diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go index c77583462fdb5..da8e8e51e2128 100644 --- a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/storage.go @@ -2,15 +2,43 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Package backend proides the backends used by fake-gcs-server. package backend -// Storage is the generic interface for implementing the backend storage of the server +type Conditions interface { + ConditionsMet(activeGeneration int64) bool +} + +type NoConditions struct{} + +func (NoConditions) ConditionsMet(int64) bool { + return true +} + +// Storage is the generic interface for implementing the backend storage of the +// server. type Storage interface { - CreateBucket(name string) error - ListBuckets() ([]string, error) - GetBucket(name string) error - CreateObject(obj Object) error - ListObjects(bucketName string) ([]Object, error) - GetObject(bucketName, objectName string) (Object, error) + CreateBucket(name string, bucketAttrs BucketAttrs) error + ListBuckets() ([]Bucket, error) + GetBucket(name string) (Bucket, error) + UpdateBucket(name string, attrsToUpdate BucketAttrs) error + DeleteBucket(name string) error + CreateObject(obj StreamingObject, conditions Conditions) (StreamingObject, error) + ListObjects(bucketName string, prefix string, versions bool) ([]ObjectAttrs, error) + GetObject(bucketName, objectName string) (StreamingObject, error) + GetObjectWithGeneration(bucketName, objectName string, generation int64) (StreamingObject, error) DeleteObject(bucketName, objectName string) error + PatchObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) + UpdateObject(bucketName, objectName string, attrsToUpdate ObjectAttrs) (StreamingObject, error) + ComposeObject(bucketName string, objectNames []string, destinationName string, metadata map[string]string, contentType string) (StreamingObject, error) } + +type Error string + +func (e Error) Error() string { return string(e) } + +const ( + BucketNotFound = Error("bucket not found") + BucketNotEmpty = Error("bucket must be empty prior to deletion") + PreConditionFailed = Error("Precondition failed") +) diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_bsd.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_bsd.go new file mode 100644 index 0000000000000..a01893f6be036 --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_bsd.go @@ -0,0 +1,19 @@ +// Copyright 2019 Francisco Souza. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin || freebsd + +package backend + +import ( + "os" + "syscall" +) + +func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec { + if statT, ok := input.Sys().(*syscall.Stat_t); ok { + return statT.Ctimespec + } + return syscall.Timespec{} +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go new file mode 100644 index 0000000000000..0f959e9b74c6c --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_linux.go @@ -0,0 +1,18 @@ +// Copyright 2019 Francisco Souza. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package backend + +import ( + "os" + "syscall" +) + +func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec { + if statT, ok := input.Sys().(*syscall.Stat_t); ok { + // not true: Ctime is not created time, but not creating a file to persist this metadata, yet... + return statT.Ctim + } + return syscall.Timespec{} +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go new file mode 100644 index 0000000000000..54c7bc9b0badd --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/time_windows.go @@ -0,0 +1,18 @@ +// Copyright 2019 Francisco Souza. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package backend + +import ( + "os" + "syscall" +) + +func createTimeFromFileInfo(input os.FileInfo) syscall.Timespec { + if statT, ok := input.Sys().(*syscall.Win32FileAttributeData); ok { + nsec := statT.CreationTime.Nanoseconds() + return syscall.NsecToTimespec(nsec) + } + return syscall.Timespec{} +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go new file mode 100644 index 0000000000000..2e5e510fbc3d4 --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_unix.go @@ -0,0 +1,17 @@ +// Copyright 2022 Francisco Souza. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +package backend + +import ( + "os" + + "github.com/google/renameio/v2" +) + +func writeFile(filename string, data []byte, perm os.FileMode) error { + return renameio.WriteFile(filename, data, perm) +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go new file mode 100644 index 0000000000000..2d6600c803024 --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/backend/writefile_windows.go @@ -0,0 +1,13 @@ +// Copyright 2022 Francisco Souza. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package backend + +import ( + "os" +) + +func writeFile(filename string, data []byte, perm os.FileMode) error { + return os.WriteFile(filename, data, perm) +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go b/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go new file mode 100644 index 0000000000000..c247336d8e65e --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/checksum/checksum.go @@ -0,0 +1,70 @@ +// Copyright 2021 Francisco Souza. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package checksum + +import ( + "crypto/md5" + "encoding/base64" + "hash" + "hash/crc32" +) + +var crc32cTable = crc32.MakeTable(crc32.Castagnoli) + +func crc32cChecksum(content []byte) []byte { + checksummer := crc32.New(crc32cTable) + checksummer.Write(content) + return checksummer.Sum(make([]byte, 0, 4)) +} + +func EncodedChecksum(checksum []byte) string { + return base64.StdEncoding.EncodeToString(checksum) +} + +func EncodedCrc32cChecksum(content []byte) string { + return EncodedChecksum(crc32cChecksum(content)) +} + +func MD5Hash(b []byte) []byte { + h := md5.New() + h.Write(b) + return h.Sum(nil) +} + +func EncodedHash(hash []byte) string { + return base64.StdEncoding.EncodeToString(hash) +} + +func EncodedMd5Hash(content []byte) string { + return EncodedHash(MD5Hash(content)) +} + +type StreamingHasher struct { + crc32 hash.Hash32 + md5 hash.Hash +} + +func NewStreamingHasher() *StreamingHasher { + return &StreamingHasher{ + crc32: crc32.New(crc32cTable), + md5: md5.New(), + } +} + +func (s *StreamingHasher) Write(p []byte) (n int, err error) { + n, err = s.crc32.Write(p) + if err != nil { + return n, err + } + return s.md5.Write(p) +} + +func (s *StreamingHasher) EncodedCrc32cChecksum() string { + return EncodedChecksum(s.crc32.Sum(nil)) +} + +func (s *StreamingHasher) EncodedMd5Hash() string { + return EncodedHash(s.md5.Sum(nil)) +} diff --git a/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go b/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go new file mode 100644 index 0000000000000..f20ac8c87a40a --- /dev/null +++ b/vendor/github.com/fsouza/fake-gcs-server/internal/notification/event.go @@ -0,0 +1,222 @@ +package notification + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + "time" + + "cloud.google.com/go/pubsub" + "github.com/fsouza/fake-gcs-server/internal/backend" +) + +// EventType is the type of event to trigger. The descriptions of the events +// can be found here: +// https://cloud.google.com/storage/docs/pubsub-notifications#events. +type EventType string + +const ( + // EventFinalize is triggered when an object is added. + EventFinalize EventType = "OBJECT_FINALIZE" + // EventDelete is triggered when an object is deleted. + EventDelete = "OBJECT_DELETE" + // EventMetadata is triggered when an object's metadata is changed. + EventMetadata = "OBJECT_METADATA_UPDATE" + // EventArchive bucket versioning must be enabled. is triggered when an object becomes the non current version + EventArchive = "OBJECT_ARCHIVE" +) + +// EventNotificationOptions contains flags for events, that if true, will create +// trigger notifications when they occur. +type EventNotificationOptions struct { + Finalize bool + Delete bool + MetadataUpdate bool + Archive bool +} + +// EventManagerOptions determines what events are triggered and where. +type EventManagerOptions struct { + // ProjectID is the project ID containing the pubsub topic. + ProjectID string + // TopicName is the pubsub topic name to publish events on. + TopicName string + // Bucket is the name of the bucket to publish events from. + Bucket string + // ObjectPrefix, if not empty, only objects having this prefix will generate + // trigger events. + ObjectPrefix string + // NotifyOn determines what events to trigger. + NotifyOn EventNotificationOptions +} + +type EventManager interface { + Trigger(o *backend.StreamingObject, eventType EventType, extraEventAttr map[string]string) +} + +// PubsubEventManager checks if an event should be published. +type PubsubEventManager struct { + // publishSynchronously is a flag that if true, events will be published + // synchronously and not in a goroutine. It is used during tests to prevent + // race conditions. + publishSynchronously bool + // notifyOn determines what events are triggered. + notifyOn EventNotificationOptions + // writer is where logs are written to. + writer io.Writer + // bucket, if not empty, only objects from this bucker will generate trigger events. + bucket string + // objectPrefix, if not empty, only objects having this prefix will generate + // trigger events. + objectPrefix string + // publisher is used to publish events on. + publisher eventPublisher +} + +func NewPubsubEventManager(options EventManagerOptions, w io.Writer) (*PubsubEventManager, error) { + manager := &PubsubEventManager{ + writer: w, + notifyOn: options.NotifyOn, + bucket: options.Bucket, + objectPrefix: options.ObjectPrefix, + } + if options.ProjectID != "" && options.TopicName != "" { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, options.ProjectID) + if err != nil { + return nil, fmt.Errorf("error creating pubsub client: %v", err) + } + manager.publisher = client.Topic(options.TopicName) + } + return manager, nil +} + +// eventPublisher is the interface to publish triggered events. +type eventPublisher interface { + Publish(ctx context.Context, msg *pubsub.Message) *pubsub.PublishResult +} + +// Trigger checks if an event should be triggered. If so, it publishes the +// event to a pubsub queue. +func (m *PubsubEventManager) Trigger(o *backend.StreamingObject, eventType EventType, extraEventAttr map[string]string) { + if m.publisher == nil { + return + } + if m.bucket != "" && o.BucketName != m.bucket { + return + } + if m.objectPrefix != "" && !strings.HasPrefix(o.Name, m.objectPrefix) { + return + } + switch eventType { + case EventFinalize: + if !m.notifyOn.Finalize { + return + } + case EventDelete: + if !m.notifyOn.Delete { + return + } + case EventMetadata: + if !m.notifyOn.MetadataUpdate { + return + } + case EventArchive: + if !m.notifyOn.Archive { + return + } + } + eventTime := time.Now().Format(time.RFC3339) + publishFunc := func() { + err := m.publish(o, eventType, eventTime, extraEventAttr) + if m.writer != nil { + if err != nil { + fmt.Fprintf(m.writer, "error publishing event: %v", err) + } else { + fmt.Fprintf(m.writer, "sent event %s for object %s\n", string(eventType), o.ID()) + } + } + } + if m.publishSynchronously { + publishFunc() + } else { + go publishFunc() + } +} + +func (m *PubsubEventManager) publish(o *backend.StreamingObject, eventType EventType, eventTime string, extraEventAttr map[string]string) error { + ctx := context.Background() + data, attributes, err := generateEvent(o, eventType, eventTime, extraEventAttr) + if err != nil { + return err + } + if r := m.publisher.Publish(ctx, &pubsub.Message{ + Data: data, + Attributes: attributes, + }); r != nil { + _, err = r.Get(ctx) + return err + } + return nil +} + +// gcsEvent is the payload of a GCS event. Note that all properties are string-quoted. +// The description of the full object can be found here: +// https://cloud.google.com/storage/docs/json_api/v1/objects#resource-representations. +type gcsEvent struct { + Kind string `json:"kind"` + ID string `json:"id"` + Name string `json:"name"` + Bucket string `json:"bucket"` + Generation int64 `json:"generation,string,omitempty"` + ContentType string `json:"contentType"` + ContentEncoding string `json:"contentEncoding,omitempty"` + Created string `json:"timeCreated,omitempty"` + Updated string `json:"updated,omitempty"` + StorageClass string `json:"storageClass"` + Size int64 `json:"size,string"` + MD5Hash string `json:"md5Hash,omitempty"` + CRC32c string `json:"crc32c,omitempty"` + MetaData map[string]string `json:"metadata,omitempty"` +} + +func generateEvent(o *backend.StreamingObject, eventType EventType, eventTime string, extraEventAttr map[string]string) ([]byte, map[string]string, error) { + payload := gcsEvent{ + Kind: "storage#object", + ID: o.ID(), + Name: o.Name, + Bucket: o.BucketName, + Generation: o.Generation, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + Created: o.Created, + Updated: o.Updated, + StorageClass: "STANDARD", + Size: o.Size, + MD5Hash: o.Md5Hash, + CRC32c: o.Crc32c, + MetaData: o.Metadata, + } + attributes := map[string]string{ + "bucketId": o.BucketName, + "eventTime": eventTime, + "eventType": string(eventType), + "objectGeneration": strconv.FormatInt(o.Generation, 10), + "objectId": o.Name, + "payloadFormat": "JSON_API_V1", + } + for k, v := range extraEventAttr { + if _, exists := attributes[k]; exists { + return nil, nil, fmt.Errorf("cannot overwrite duplicate event attribute %s", k) + } + attributes[k] = v + } + data, err := json.Marshal(&payload) + if err != nil { + return nil, nil, err + } + return data, attributes, nil +} diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go index 969b910d706d4..6f5184fef7d8e 100644 --- a/vendor/github.com/google/btree/btree.go +++ b/vendor/github.com/google/btree/btree.go @@ -479,7 +479,7 @@ func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) child := n.mutableChild(i) // merge with right child mergeItem := n.items.removeAt(i) - mergeChild := n.children.removeAt(i + 1) + mergeChild := n.children.removeAt(i + 1).mutableFor(n.cow) child.items = append(child.items, mergeItem) child.items = append(child.items, mergeChild.items...) child.children = append(child.children, mergeChild.children...) diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der deleted file mode 100644 index 958f3cfaddf3645fa6c0578b5b6955d65ac4c172..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 998 zcmXqLVt!=M#B^!_GZP~dlZfxTkgMw#Mx5CteAhsdS=K*t&xAY!UN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Gec7&Lt`^z^C)p%6J+ina%mHz5^^vyvNA9?G4eAQ zG%<29H8CnfQWlH+MyjTjz z2Bjk+Ub9TzT(Z$!`+w1f9a{SYn?h^$U$mDJKK^KOurlxGlhf8coXvjW^}bgt-rFjD znHJ|Xo9Wew;}%JlW_Kjsthc+chb^XXpU3LOdy_KCS4OTntgUFd?YsGkV{8gPCrIUU z7xZ1<|6Oe5g44&3J$ODp>G5YKW=00a#V!U;2J*mEAgjzGVIbBZvce&_^W7!+r|T*d zavIkkZ(n>dQ`LY6q(GR3)qt6i@xOr}h$qOxWx&zImXe>Fn2DZTf#J-^uui3sYst!# z{ImS~I{#0uS#WDz%!i-3yIxOGkACKhN$DyM2L{LbrkL@xJCpY3crHP)J~LHEx;nsZlX$z1Na++S~*x&F8L zSHsIX`Pf;-7fz4f$yxWfRV}c$Te2$X#22gkrUlwjtNp9{IiD}~zjo}z)1J=iJoj~z zPFk$n)~eK8B%5+0@w1g$N8H(l+-H4!LOMmyS_+%b`$^TD`}g=z?!9=Gts-l=GE2@y HTP6bly#{pd diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der deleted file mode 100644 index d2817641bafb022339926786ab85b545f40ac665..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1147 zcmXqLVktLhVvb+H%*4pVB*OPMYW498DSV5WgVNVW|Jr8oCgimNFB_*;n@8JsUPeZ4 zRtAH{c0+ChPB!LH7B*p~&|nycgNMo4(NM@h03^fC!x>zfmseSqn3HNKV890w6yxnyM_06xZQcZ?W@M4y!->Y40>Qzg1y0 z&gNQpqF{|c>D+ZZ(rFIKEL_n{%!~|-iyLnkG+s4m+z3nvvdS!tD-9Z#Eo|&v(%3b4 zVzNoZ_g%=<^$R1;>=M3fAjvH2AGv2jo&gWY$-*qG2F#3%{|y8|JV6#N1CA!Pl>FSp z%sk}C2j(GQ()!XwH^ngbB~t_{rh@w&h5|n>_#Tt3odjy zuPCdY_u)fF^#8=jc^^+Q{aqN5`$@Gm({I-q6VU_LtQOD6naL>j(`V|D&;QI6%^$Dc z@$uH$=blqSKTV1FFaEpmVP=>@vvi*Y`l|mqO|ORNA0oIS8nOI z?NiA8%5=ljL@D29-OKpueEYz|zv5rHb554&taa#n7142tpUKWU?R-O_r^0)6j>G3Y aOB;Xxy<$H9NT-Uzt(mu6i*GEf3IzZ)c%0q< diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der deleted file mode 100644 index d8c3710c85f9ff41ddfc709924c866350a727a4f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1013 zcmXqLV*Y5*#B_53GZP~dlZa+?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C z&5N-8^H(H$?psgMz!}^;s& z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) zTR0VF*I2Sr HHQW{e5x{wf diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der deleted file mode 100644 index dae619c097512f20d09d2054c63fc0f715d7be24..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 998 zcmXqLVt!=M#B^!_GZP~dlZZk6mxvepr#-!NVoqOL*`yn*LOZ7#@Un4gwRyCC=VfH% zW@RvFY&YaK;ACSCWnmL$3Jr#FICz+x9SwyH1VA$EJeHTH8JZfJT11KSnjmurkxQExm5_sxk(GhDiIJbd zpox)-sfm%1VO{t`EzVyWX$3+}`kJnrGQY>^=jnZyz4d%;|IQeuB_Gcfo1a{_>B5Fg zXYbI=4K*7QZdjc*51-)~EjRzyir2SPA5Z@zcTLeXV*g^*2f0P(7p>c z?c<>#uh>H(ryKBq6bQ4h8Za|5{x=W=@dR183^XGxLzc8JJvw;mpWjp+5Pu z!*lq#5Sb^i@`a>klJNjzx%iu)2*?h3yy-gEL(;p^g~mZo#n!wzh9Qua5C+wfRd ztarh_l9E{Oro7&WNVTl@3-~vy&EDU;)-f|r-8<&mQ>C=;``&D2+5V0(pW(g3o1}Sj z^gY793Mi&+ntK1`1tm%ENt=U{rb%k1PtbRHEhYTylqR?8HqOeuvA1fBY>pc~NUFTR KZE!sDzAgX``fm0B diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der deleted file mode 100644 index ce7f8d31d6802c7e68c188af8797c3a063894857..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1147 zcmXqLVktLhVvb+H%*4pVB%&$)xidhT`$ARBi)~L`p2_`@+!*`b5acj4ER7|Ts-W~ z`MCv&d6kBO2K*oqZXOPo%(Bel%=|nTUzhK>3dC_Z(le%bCn$9moio9jI@8{ zd;5%c{XT!(ig$;5K+$bpwI6ChVmse+UV1r`!RY5*U#A%ppFJxS>PX#w$sz7*X_R~A zg!|$g&z66XI$)aW-TrxDio@fp!U^sS#+*GdHMB|H?BTf!^g~J_=D$Y-Ot{~xpTg~G+{k6 Zdr{DieczoQDL-a%Ib3(Tsv+bGCjf+Ap-TV& diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der deleted file mode 100644 index 04b0d73600b72f80a03943d41973b279db9e8b32..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1013 zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk zCap+`USuL4V}qf=6kBV_EFAN zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta?pWMd9xVH0Kw4Tf@Xyh=ks1AdSQHxGwPW?5!&W`3Tbn1KjLh?$4C*eEf+B(=CCJ~_3h#8ApW0&YDg zqZoHZWt9UE>m}#s8pw(B8W|aw8XFoJ7#JIxMv3#90J%nHQ0^ddX%nLoaxgNoGB7tW z@-rATF>*0AF)}hdZr#y6M}F(|x(>G^%gcAlKWcTb*S((Se#vIyuMojqJ6T@W9G$-N zZ|C3aC}V-ru$PA$a>||`V5!;WvLO9;#Z4xLTYkS~Q%%)mH+DGbsA<2rr@8F$Psyk! zC02_czBy5RY)XCG=Yl1DX=(>VSrdJ-C#|hK7^zn?b35awX)f$Ntvf$$ zcUg9sL1fPSDbXqWt6$VVZu~MYZrj`2y}fI?80UuXsujC8ao09pzReSEFH3&Hjk;!&j?yPw^7|NHlfMLseyY=TyHB}&&4U!T;kt&%Ic z-@@f=`1w!pGyVU%zJI$PWGYXZ;?Bg($iTQb*dWkA7MKcT`B=nQL{>NicfPwM|8!l2 zLQdoQtUFt($qvv(cdI=i6tz3tUW`|a0C z&5N-8^H(H$?psgMz!}^;s& z=?+d?)Hkg%emvc~QLj+DwRvTMu)Tl4_m}{_7kTC1{xNsF-p9krsM-B)Ge_#Kiu#~{ zd67^5U(N6lVcaug+5}Tw2V+f_-BCNQyG;7WAZ_<^`6QjCrOhu7&ztq4tdo7t^%&>) zTR0VF*I2Sr HHQW{e5x{wf diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem deleted file mode 100644 index 493a5a2648101..0000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL -BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 -YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE -AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN -MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ -BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx -ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ -KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 -a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 -OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 -RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK -P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 -HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu -0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 -EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 -/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA -QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ -nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD -X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco -pKklVz0= ------END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem deleted file mode 100644 index 55a7f10c742db..0000000000000 --- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF -l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj -+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G -4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA -xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh -68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ -/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL -Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA -VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 -9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH -MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt -aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq -xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx -2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv -EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z -aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq -udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs -VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm -56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT -GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V -Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm -HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q -BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH -qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh -GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= ------END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der deleted file mode 100644 index 04b0d73600b72f80a03943d41973b279db9e8b32..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1013 zcmXqLV*Y5*#B_53GZP~dlZb{E^N!s~snriRh?%)HJiGKl>CAisUN%mxHjlRNyo`+8 ztPBQ??S|Y2oNUaYENsF|p}{Z?2M?38qoI(207!`V#>K<# zoS$2em{(~iXuuB=;pX9R$t=q(&dkp<6f+P32{H5V78@nTm!uY##3!c~l^9AHNWiV< zWEA7BsH}1TV!h=2Tmw0AULzv|Q)5FT0|N^q(aL-(RkWjuO*nCZC(^b4{cp%V+e53*PfoZW)2DU5BJN^Jypf0+ z%dyqB7eAAEDfH_&Z*6C3aP8^9M)sYbgC^`~v;4F5#oepTLYl5+2bdY(i!-?My>4G) z&8u+L`?_9LxQB1$HP?fpd*5i(-|SJJ!`sZHa8UZu(QeP3Dc(Yr8?G#iY20z>);Fh$ zMQ>DZHkC%KUBdEb&cYv;1sHCx@Vv~#%*epFIM^W2Ko*z^WcgUcSVVXxICA%7?w=9J zwOIRjXviz}kjUu<@*rtt76}8f2J8wz2}@R(k?}tZs{u2RLJoFd>IDWnBZI}4y?mP{ zW+WALO{`uTY0T-O8uy=9`&z_;*e^$wYu3!Wz-Re-&a7vV%g_8h++q8CMf&zxTFdl~ zp5i+{-R9L&+mdUFEBoKr@u=5M->UQ>MNWJ7BRi)4^M4~B*uKhe`J%yAbG&-Thr?bk zCap+`USuL4V}qf=6kBV_EFAN zmIn*31Qb`P{oxd|=2o5l$|&AdTJ_-*k&5oDQvTTOiN8OjczyHyta /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: verify +verify: sec govulncheck lint test + +.PHONY: lint +lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint #####" + golangci-lint run -v + +.PHONY: sec +sec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec #####" + gosec ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck #####" + govulncheck ./... + +.PHONY: test +test: + @echo "##### Running tests #####" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md new file mode 100644 index 0000000000000..02555b2642c5f --- /dev/null +++ b/vendor/github.com/gorilla/handlers/README.md @@ -0,0 +1,56 @@ +# gorilla/handlers + +![Testing](https://github.com/gorilla/handlers/actions/workflows/test.yml/badge.svg) +[![Codecov](https://codecov.io/github/gorilla/handlers/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/handlers) +[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) +[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/handlers/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/handlers?badge) + +Package handlers is a collection of handlers (aka "HTTP middleware") for use +with Go's `net/http` package (or any framework supporting `http.Handler`), including: + +* [**LoggingHandler**](https://godoc.org/github.com/gorilla/handlers#LoggingHandler) for logging HTTP requests in the Apache [Common Log + Format](http://httpd.apache.org/docs/2.2/logs.html#common). +* [**CombinedLoggingHandler**](https://godoc.org/github.com/gorilla/handlers#CombinedLoggingHandler) for logging HTTP requests in the Apache [Combined Log + Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by + both Apache and nginx. +* [**CompressHandler**](https://godoc.org/github.com/gorilla/handlers#CompressHandler) for gzipping responses. +* [**ContentTypeHandler**](https://godoc.org/github.com/gorilla/handlers#ContentTypeHandler) for validating requests against a list of accepted + content types. +* [**MethodHandler**](https://godoc.org/github.com/gorilla/handlers#MethodHandler) for matching HTTP methods against handlers in a + `map[string]http.Handler` +* [**ProxyHeaders**](https://godoc.org/github.com/gorilla/handlers#ProxyHeaders) for populating `r.RemoteAddr` and `r.URL.Scheme` based on the + `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded` + headers when running a Go server behind a HTTP reverse proxy. +* [**CanonicalHost**](https://godoc.org/github.com/gorilla/handlers#CanonicalHost) for re-directing to the preferred host when handling multiple + domains (i.e. multiple CNAME aliases). +* [**RecoveryHandler**](https://godoc.org/github.com/gorilla/handlers#RecoveryHandler) for recovering from unexpected panics. + +Other handlers are documented [on the Gorilla +website](https://www.gorillatoolkit.org/pkg/handlers). + +## Example + +A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`: + +```go +import ( + "net/http" + "github.com/gorilla/handlers" +) + +func main() { + r := http.NewServeMux() + + // Only log requests to our admin dashboard to stdout + r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard))) + r.HandleFunc("/", ShowIndex) + + // Wrap our server with our gzip handler to gzip compress all responses. + http.ListenAndServe(":8000", handlers.CompressHandler(r)) +} +``` + +## License + +BSD licensed. See the included LICENSE file for details. + diff --git a/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/gorilla/handlers/canonical.go new file mode 100644 index 0000000000000..7121f5307bec9 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/canonical.go @@ -0,0 +1,73 @@ +package handlers + +import ( + "net/http" + "net/url" + "strings" +) + +type canonical struct { + h http.Handler + domain string + code int +} + +// CanonicalHost is HTTP middleware that re-directs requests to the canonical +// domain. It accepts a domain and a status code (e.g. 301 or 302) and +// re-directs clients to this domain. The existing request path is maintained. +// +// Note: If the provided domain is considered invalid by url.Parse or otherwise +// returns an empty scheme or host, clients are not re-directed. +// +// Example: +// +// r := mux.NewRouter() +// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) +// r.HandleFunc("/route", YourHandler) +// +// log.Fatal(http.ListenAndServe(":7000", canonical(r))) +func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler { + fn := func(h http.Handler) http.Handler { + return canonical{h, domain, code} + } + + return fn +} + +func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) { + dest, err := url.Parse(c.domain) + if err != nil { + // Call the next handler if the provided domain fails to parse. + c.h.ServeHTTP(w, r) + return + } + + if dest.Scheme == "" || dest.Host == "" { + // Call the next handler if the scheme or host are empty. + // Note that url.Parse won't fail on in this case. + c.h.ServeHTTP(w, r) + return + } + + if !strings.EqualFold(cleanHost(r.Host), dest.Host) { + // Re-build the destination URL + dest := dest.Scheme + "://" + dest.Host + r.URL.Path + if r.URL.RawQuery != "" { + dest += "?" + r.URL.RawQuery + } + http.Redirect(w, r, dest, c.code) + return + } + + c.h.ServeHTTP(w, r) +} + +// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '. +// This is backported from Go 1.5 (in response to issue #11206) and attempts to +// mitigate malformed Host headers that do not match the format in RFC7230. +func cleanHost(in string) string { + if i := strings.IndexAny(in, " /"); i != -1 { + return in[:i] + } + return in +} diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go new file mode 100644 index 0000000000000..d6f589503b5ea --- /dev/null +++ b/vendor/github.com/gorilla/handlers/compress.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package handlers + +import ( + "compress/flate" + "compress/gzip" + "io" + "net/http" + "strings" + + "github.com/felixge/httpsnoop" +) + +const acceptEncoding string = "Accept-Encoding" + +type compressResponseWriter struct { + compressor io.Writer + w http.ResponseWriter +} + +func (cw *compressResponseWriter) WriteHeader(c int) { + cw.w.Header().Del("Content-Length") + cw.w.WriteHeader(c) +} + +func (cw *compressResponseWriter) Write(b []byte) (int, error) { + h := cw.w.Header() + if h.Get("Content-Type") == "" { + h.Set("Content-Type", http.DetectContentType(b)) + } + h.Del("Content-Length") + + return cw.compressor.Write(b) +} + +func (cw *compressResponseWriter) ReadFrom(r io.Reader) (int64, error) { + return io.Copy(cw.compressor, r) +} + +type flusher interface { + Flush() error +} + +func (cw *compressResponseWriter) Flush() { + // Flush compressed data if compressor supports it. + if f, ok := cw.compressor.(flusher); ok { + _ = f.Flush() + } + // Flush HTTP response. + if f, ok := cw.w.(http.Flusher); ok { + f.Flush() + } +} + +// CompressHandler gzip compresses HTTP responses for clients that support it +// via the 'Accept-Encoding' header. +// +// Compressing TLS traffic may leak the page contents to an attacker if the +// page contains user input: http://security.stackexchange.com/a/102015/12208 +func CompressHandler(h http.Handler) http.Handler { + return CompressHandlerLevel(h, gzip.DefaultCompression) +} + +// CompressHandlerLevel gzip compresses HTTP responses with specified compression level +// for clients that support it via the 'Accept-Encoding' header. +// +// The compression level should be gzip.DefaultCompression, gzip.NoCompression, +// or any integer value between gzip.BestSpeed and gzip.BestCompression inclusive. +// gzip.DefaultCompression is used in case of invalid compression level. +func CompressHandlerLevel(h http.Handler, level int) http.Handler { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + level = gzip.DefaultCompression + } + + const ( + gzipEncoding = "gzip" + flateEncoding = "deflate" + ) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // detect what encoding to use + var encoding string + for _, curEnc := range strings.Split(r.Header.Get(acceptEncoding), ",") { + curEnc = strings.TrimSpace(curEnc) + if curEnc == gzipEncoding || curEnc == flateEncoding { + encoding = curEnc + break + } + } + + // always add Accept-Encoding to Vary to prevent intermediate caches corruption + w.Header().Add("Vary", acceptEncoding) + + // if we weren't able to identify an encoding we're familiar with, pass on the + // request to the handler and return + if encoding == "" { + h.ServeHTTP(w, r) + return + } + + if r.Header.Get("Upgrade") != "" { + h.ServeHTTP(w, r) + return + } + + // wrap the ResponseWriter with the writer for the chosen encoding + var encWriter io.WriteCloser + if encoding == gzipEncoding { + encWriter, _ = gzip.NewWriterLevel(w, level) + } else if encoding == flateEncoding { + encWriter, _ = flate.NewWriter(w, level) + } + defer encWriter.Close() + + w.Header().Set("Content-Encoding", encoding) + r.Header.Del(acceptEncoding) + + cw := &compressResponseWriter{ + w: w, + compressor: encWriter, + } + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return cw.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return cw.WriteHeader + }, + Flush: func(httpsnoop.FlushFunc) httpsnoop.FlushFunc { + return cw.Flush + }, + ReadFrom: func(rff httpsnoop.ReadFromFunc) httpsnoop.ReadFromFunc { + return cw.ReadFrom + }, + }) + + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go new file mode 100644 index 0000000000000..8af9c096e5e40 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/cors.go @@ -0,0 +1,352 @@ +package handlers + +import ( + "net/http" + "strconv" + "strings" +) + +// CORSOption represents a functional option for configuring the CORS middleware. +type CORSOption func(*cors) error + +type cors struct { + h http.Handler + allowedHeaders []string + allowedMethods []string + allowedOrigins []string + allowedOriginValidator OriginValidator + exposedHeaders []string + maxAge int + ignoreOptions bool + allowCredentials bool + optionStatusCode int +} + +// OriginValidator takes an origin string and returns whether or not that origin is allowed. +type OriginValidator func(string) bool + +var ( + defaultCorsOptionStatusCode = http.StatusOK + defaultCorsMethods = []string{http.MethodGet, http.MethodHead, http.MethodPost} + defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"} + // (WebKit/Safari v9 sends the Origin header by default in AJAX requests). +) + +const ( + corsOptionMethod string = http.MethodOptions + corsAllowOriginHeader string = "Access-Control-Allow-Origin" + corsExposeHeadersHeader string = "Access-Control-Expose-Headers" + corsMaxAgeHeader string = "Access-Control-Max-Age" + corsAllowMethodsHeader string = "Access-Control-Allow-Methods" + corsAllowHeadersHeader string = "Access-Control-Allow-Headers" + corsAllowCredentialsHeader string = "Access-Control-Allow-Credentials" + corsRequestMethodHeader string = "Access-Control-Request-Method" + corsRequestHeadersHeader string = "Access-Control-Request-Headers" + corsOriginHeader string = "Origin" + corsVaryHeader string = "Vary" + corsOriginMatchAll string = "*" +) + +func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { + origin := r.Header.Get(corsOriginHeader) + if !ch.isOriginAllowed(origin) { + if r.Method != corsOptionMethod || ch.ignoreOptions { + ch.h.ServeHTTP(w, r) + } + + return + } + + if r.Method == corsOptionMethod { + if ch.ignoreOptions { + ch.h.ServeHTTP(w, r) + return + } + + if _, ok := r.Header[corsRequestMethodHeader]; !ok { + w.WriteHeader(http.StatusBadRequest) + return + } + + method := r.Header.Get(corsRequestMethodHeader) + if !ch.isMatch(method, ch.allowedMethods) { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + requestHeaders := strings.Split(r.Header.Get(corsRequestHeadersHeader), ",") + allowedHeaders := []string{} + for _, v := range requestHeaders { + canonicalHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) + if canonicalHeader == "" || ch.isMatch(canonicalHeader, defaultCorsHeaders) { + continue + } + + if !ch.isMatch(canonicalHeader, ch.allowedHeaders) { + w.WriteHeader(http.StatusForbidden) + return + } + + allowedHeaders = append(allowedHeaders, canonicalHeader) + } + + if len(allowedHeaders) > 0 { + w.Header().Set(corsAllowHeadersHeader, strings.Join(allowedHeaders, ",")) + } + + if ch.maxAge > 0 { + w.Header().Set(corsMaxAgeHeader, strconv.Itoa(ch.maxAge)) + } + + if !ch.isMatch(method, defaultCorsMethods) { + w.Header().Set(corsAllowMethodsHeader, method) + } + } else if len(ch.exposedHeaders) > 0 { + w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) + } + + if ch.allowCredentials { + w.Header().Set(corsAllowCredentialsHeader, "true") + } + + if len(ch.allowedOrigins) > 1 { + w.Header().Set(corsVaryHeader, corsOriginHeader) + } + + returnOrigin := origin + if ch.allowedOriginValidator == nil && len(ch.allowedOrigins) == 0 { + returnOrigin = "*" + } else { + for _, o := range ch.allowedOrigins { + // A configuration of * is different than explicitly setting an allowed + // origin. Returning arbitrary origin headers in an access control allow + // origin header is unsafe and is not required by any use case. + if o == corsOriginMatchAll { + returnOrigin = "*" + break + } + } + } + w.Header().Set(corsAllowOriginHeader, returnOrigin) + + if r.Method == corsOptionMethod { + w.WriteHeader(ch.optionStatusCode) + return + } + ch.h.ServeHTTP(w, r) +} + +// CORS provides Cross-Origin Resource Sharing middleware. +// Example: +// +// import ( +// "net/http" +// +// "github.com/gorilla/handlers" +// "github.com/gorilla/mux" +// ) +// +// func main() { +// r := mux.NewRouter() +// r.HandleFunc("/users", UserEndpoint) +// r.HandleFunc("/projects", ProjectEndpoint) +// +// // Apply the CORS middleware to our top-level router, with the defaults. +// http.ListenAndServe(":8000", handlers.CORS()(r)) +// } +func CORS(opts ...CORSOption) func(http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + ch := parseCORSOptions(opts...) + ch.h = h + return ch + } +} + +func parseCORSOptions(opts ...CORSOption) *cors { + ch := &cors{ + allowedMethods: defaultCorsMethods, + allowedHeaders: defaultCorsHeaders, + allowedOrigins: []string{}, + optionStatusCode: defaultCorsOptionStatusCode, + } + + for _, option := range opts { + _ = option(ch) //TODO: @bharat-rajani, return error to caller if not nil? + } + + return ch +} + +// +// Functional options for configuring CORS. +// + +// AllowedHeaders adds the provided headers to the list of allowed headers in a +// CORS request. +// This is an append operation so the headers Accept, Accept-Language, +// and Content-Language are always allowed. +// Content-Type must be explicitly declared if accepting Content-Types other than +// application/x-www-form-urlencoded, multipart/form-data, or text/plain. +func AllowedHeaders(headers []string) CORSOption { + return func(ch *cors) error { + for _, v := range headers { + normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) + if normalizedHeader == "" { + continue + } + + if !ch.isMatch(normalizedHeader, ch.allowedHeaders) { + ch.allowedHeaders = append(ch.allowedHeaders, normalizedHeader) + } + } + + return nil + } +} + +// AllowedMethods can be used to explicitly allow methods in the +// Access-Control-Allow-Methods header. +// This is a replacement operation so you must also +// pass GET, HEAD, and POST if you wish to support those methods. +func AllowedMethods(methods []string) CORSOption { + return func(ch *cors) error { + ch.allowedMethods = []string{} + for _, v := range methods { + normalizedMethod := strings.ToUpper(strings.TrimSpace(v)) + if normalizedMethod == "" { + continue + } + + if !ch.isMatch(normalizedMethod, ch.allowedMethods) { + ch.allowedMethods = append(ch.allowedMethods, normalizedMethod) + } + } + + return nil + } +} + +// AllowedOrigins sets the allowed origins for CORS requests, as used in the +// 'Allow-Access-Control-Origin' HTTP header. +// Note: Passing in a []string{"*"} will allow any domain. +func AllowedOrigins(origins []string) CORSOption { + return func(ch *cors) error { + for _, v := range origins { + if v == corsOriginMatchAll { + ch.allowedOrigins = []string{corsOriginMatchAll} + return nil + } + } + + ch.allowedOrigins = origins + return nil + } +} + +// AllowedOriginValidator sets a function for evaluating allowed origins in CORS requests, represented by the +// 'Allow-Access-Control-Origin' HTTP header. +func AllowedOriginValidator(fn OriginValidator) CORSOption { + return func(ch *cors) error { + ch.allowedOriginValidator = fn + return nil + } +} + +// OptionStatusCode sets a custom status code on the OPTIONS requests. +// Default behaviour sets it to 200 to reflect best practices. This is option is not mandatory +// and can be used if you need a custom status code (i.e 204). +// +// More informations on the spec: +// https://fetch.spec.whatwg.org/#cors-preflight-fetch +func OptionStatusCode(code int) CORSOption { + return func(ch *cors) error { + ch.optionStatusCode = code + return nil + } +} + +// ExposedHeaders can be used to specify headers that are available +// and will not be stripped out by the user-agent. +func ExposedHeaders(headers []string) CORSOption { + return func(ch *cors) error { + ch.exposedHeaders = []string{} + for _, v := range headers { + normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) + if normalizedHeader == "" { + continue + } + + if !ch.isMatch(normalizedHeader, ch.exposedHeaders) { + ch.exposedHeaders = append(ch.exposedHeaders, normalizedHeader) + } + } + + return nil + } +} + +// MaxAge determines the maximum age (in seconds) between preflight requests. A +// maximum of 10 minutes is allowed. An age above this value will default to 10 +// minutes. +func MaxAge(age int) CORSOption { + return func(ch *cors) error { + // Maximum of 10 minutes. + if age > 600 { + age = 600 + } + + ch.maxAge = age + return nil + } +} + +// IgnoreOptions causes the CORS middleware to ignore OPTIONS requests, instead +// passing them through to the next handler. This is useful when your application +// or framework has a pre-existing mechanism for responding to OPTIONS requests. +func IgnoreOptions() CORSOption { + return func(ch *cors) error { + ch.ignoreOptions = true + return nil + } +} + +// AllowCredentials can be used to specify that the user agent may pass +// authentication details along with the request. +func AllowCredentials() CORSOption { + return func(ch *cors) error { + ch.allowCredentials = true + return nil + } +} + +func (ch *cors) isOriginAllowed(origin string) bool { + if origin == "" { + return false + } + + if ch.allowedOriginValidator != nil { + return ch.allowedOriginValidator(origin) + } + + if len(ch.allowedOrigins) == 0 { + return true + } + + for _, allowedOrigin := range ch.allowedOrigins { + if allowedOrigin == origin || allowedOrigin == corsOriginMatchAll { + return true + } + } + + return false +} + +func (ch *cors) isMatch(needle string, haystack []string) bool { + for _, v := range haystack { + if v == needle { + return true + } + } + + return false +} diff --git a/vendor/github.com/gorilla/handlers/doc.go b/vendor/github.com/gorilla/handlers/doc.go new file mode 100644 index 0000000000000..944e5a8ae9982 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/doc.go @@ -0,0 +1,9 @@ +/* +Package handlers is a collection of handlers (aka "HTTP middleware") for use +with Go's net/http package (or any framework supporting http.Handler). + +The package includes handlers for logging in standardised formats, compressing +HTTP responses, validating content types and other useful tools for manipulating +requests and responses. +*/ +package handlers diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go new file mode 100644 index 0000000000000..9b92fce3333e7 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/handlers.go @@ -0,0 +1,150 @@ +// Copyright 2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package handlers + +import ( + "bufio" + "fmt" + "net" + "net/http" + "sort" + "strings" +) + +// MethodHandler is an http.Handler that dispatches to a handler whose key in the +// MethodHandler's map matches the name of the HTTP request's method, eg: GET +// +// If the request's method is OPTIONS and OPTIONS is not a key in the map then +// the handler responds with a status of 200 and sets the Allow header to a +// comma-separated list of available methods. +// +// If the request's method doesn't match any of its keys the handler responds +// with a status of HTTP 405 "Method Not Allowed" and sets the Allow header to a +// comma-separated list of available methods. +type MethodHandler map[string]http.Handler + +func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if handler, ok := h[req.Method]; ok { + handler.ServeHTTP(w, req) + } else { + allow := []string{} + for k := range h { + allow = append(allow, k) + } + sort.Strings(allow) + w.Header().Set("Allow", strings.Join(allow, ", ")) + if req.Method == http.MethodOptions { + w.WriteHeader(http.StatusOK) + } else { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + } + } +} + +// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP +// status code and body size. +type responseLogger struct { + w http.ResponseWriter + status int + size int +} + +func (l *responseLogger) Write(b []byte) (int, error) { + size, err := l.w.Write(b) + l.size += size + return size, err +} + +func (l *responseLogger) WriteHeader(s int) { + l.w.WriteHeader(s) + l.status = s +} + +func (l *responseLogger) Status() int { + return l.status +} + +func (l *responseLogger) Size() int { + return l.size +} + +func (l *responseLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { + conn, rw, err := l.w.(http.Hijacker).Hijack() + if err == nil && l.status == 0 { + // The status will be StatusSwitchingProtocols if there was no error and + // WriteHeader has not been called yet + l.status = http.StatusSwitchingProtocols + } + return conn, rw, err +} + +// isContentType validates the Content-Type header matches the supplied +// contentType. That is, its type and subtype match. +func isContentType(h http.Header, contentType string) bool { + ct := h.Get("Content-Type") + if i := strings.IndexRune(ct, ';'); i != -1 { + ct = ct[0:i] + } + return ct == contentType +} + +// ContentTypeHandler wraps and returns a http.Handler, validating the request +// content type is compatible with the contentTypes list. It writes a HTTP 415 +// error if that fails. +// +// Only PUT, POST, and PATCH requests are considered. +func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !(r.Method == http.MethodPut || r.Method == http.MethodPost || r.Method == http.MethodPatch) { + h.ServeHTTP(w, r) + return + } + + for _, ct := range contentTypes { + if isContentType(r.Header, ct) { + h.ServeHTTP(w, r) + return + } + } + http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", + r.Header.Get("Content-Type"), + contentTypes), + http.StatusUnsupportedMediaType) + }) +} + +const ( + // HTTPMethodOverrideHeader is a commonly used + // http header to override a request method. + HTTPMethodOverrideHeader = "X-HTTP-Method-Override" + // HTTPMethodOverrideFormKey is a commonly used + // HTML form key to override a request method. + HTTPMethodOverrideFormKey = "_method" +) + +// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for +// the X-HTTP-Method-Override header or the _method form key, and overrides (if +// valid) request.Method with its value. +// +// This is especially useful for HTTP clients that don't support many http verbs. +// It isn't secure to override e.g a GET to a POST, so only POST requests are +// considered. Likewise, the override method can only be a "write" method: PUT, +// PATCH or DELETE. +// +// Form method takes precedence over header method. +func HTTPMethodOverrideHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodPost { + om := r.FormValue(HTTPMethodOverrideFormKey) + if om == "" { + om = r.Header.Get(HTTPMethodOverrideHeader) + } + if om == http.MethodPut || om == http.MethodPatch || om == http.MethodDelete { + r.Method = om + } + } + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/gorilla/handlers/logging.go b/vendor/github.com/gorilla/handlers/logging.go new file mode 100644 index 0000000000000..2badb6fbff844 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/logging.go @@ -0,0 +1,246 @@ +// Copyright 2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package handlers + +import ( + "io" + "net" + "net/http" + "net/url" + "strconv" + "time" + "unicode/utf8" + + "github.com/felixge/httpsnoop" +) + +// Logging + +// LogFormatterParams is the structure any formatter will be handed when time to log comes. +type LogFormatterParams struct { + Request *http.Request + URL url.URL + TimeStamp time.Time + StatusCode int + Size int +} + +// LogFormatter gives the signature of the formatter function passed to CustomLoggingHandler. +type LogFormatter func(writer io.Writer, params LogFormatterParams) + +// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its +// friends + +type loggingHandler struct { + writer io.Writer + handler http.Handler + formatter LogFormatter +} + +func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + t := time.Now() + logger, w := makeLogger(w) + url := *req.URL + + h.handler.ServeHTTP(w, req) + if req.MultipartForm != nil { + err := req.MultipartForm.RemoveAll() + if err != nil { + return + } + } + + params := LogFormatterParams{ + Request: req, + URL: url, + TimeStamp: t, + StatusCode: logger.Status(), + Size: logger.Size(), + } + + h.formatter(h.writer, params) +} + +func makeLogger(w http.ResponseWriter) (*responseLogger, http.ResponseWriter) { + logger := &responseLogger{w: w, status: http.StatusOK} + return logger, httpsnoop.Wrap(w, httpsnoop.Hooks{ + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return logger.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return logger.WriteHeader + }, + }) +} + +const lowerhex = "0123456789abcdef" + +func appendQuoted(buf []byte, s string) []byte { + var runeTmp [utf8.UTFMax]byte + for width := 0; len(s) > 0; s = s[width:] { //nolint: wastedassign //TODO: why width starts from 0and reassigned as 1 + r := rune(s[0]) + width = 1 + if r >= utf8.RuneSelf { + r, width = utf8.DecodeRuneInString(s) + } + if width == 1 && r == utf8.RuneError { + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[s[0]>>4]) + buf = append(buf, lowerhex[s[0]&0xF]) + continue + } + if r == rune('"') || r == '\\' { // always backslashed + buf = append(buf, '\\') + buf = append(buf, byte(r)) + continue + } + if strconv.IsPrint(r) { + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + continue + } + switch r { + case '\a': + buf = append(buf, `\a`...) + case '\b': + buf = append(buf, `\b`...) + case '\f': + buf = append(buf, `\f`...) + case '\n': + buf = append(buf, `\n`...) + case '\r': + buf = append(buf, `\r`...) + case '\t': + buf = append(buf, `\t`...) + case '\v': + buf = append(buf, `\v`...) + default: + switch { + case r < ' ': + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[s[0]>>4]) + buf = append(buf, lowerhex[s[0]&0xF]) + case r > utf8.MaxRune: + r = 0xFFFD + fallthrough + case r < 0x10000: + buf = append(buf, `\u`...) + for s := 12; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + default: + buf = append(buf, `\U`...) + for s := 28; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + } + } + } + return buf +} + +// buildCommonLogLine builds a log entry for req in Apache Common Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte { + username := "-" + if url.User != nil { + if name := url.User.Username(); name != "" { + username = name + } + } + + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + + uri := req.RequestURI + + // Requests using the CONNECT method over HTTP/2.0 must use + // the authority field (aka r.Host) to identify the target. + // Refer: https://httpwg.github.io/specs/rfc7540.html#CONNECT + if req.ProtoMajor == 2 && req.Method == "CONNECT" { + uri = req.Host + } + if uri == "" { + uri = url.RequestURI() + } + + buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2) + buf = append(buf, host...) + buf = append(buf, " - "...) + buf = append(buf, username...) + buf = append(buf, " ["...) + buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...) + buf = append(buf, `] "`...) + buf = append(buf, req.Method...) + buf = append(buf, " "...) + buf = appendQuoted(buf, uri) + buf = append(buf, " "...) + buf = append(buf, req.Proto...) + buf = append(buf, `" `...) + buf = append(buf, strconv.Itoa(status)...) + buf = append(buf, " "...) + buf = append(buf, strconv.Itoa(size)...) + return buf +} + +// writeLog writes a log entry for req to w in Apache Common Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func writeLog(writer io.Writer, params LogFormatterParams) { + buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size) + buf = append(buf, '\n') + _, _ = writer.Write(buf) +} + +// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func writeCombinedLog(writer io.Writer, params LogFormatterParams) { + buf := buildCommonLogLine(params.Request, params.URL, params.TimeStamp, params.StatusCode, params.Size) + buf = append(buf, ` "`...) + buf = appendQuoted(buf, params.Request.Referer()) + buf = append(buf, `" "`...) + buf = appendQuoted(buf, params.Request.UserAgent()) + buf = append(buf, '"', '\n') + _, _ = writer.Write(buf) +} + +// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in +// Apache Combined Log Format. +// +// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. +// +// LoggingHandler always sets the ident field of the log to -. +func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { + return loggingHandler{out, h, writeCombinedLog} +} + +// LoggingHandler return a http.Handler that wraps h and logs requests to out in +// Apache Common Log Format (CLF). +// +// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format. +// +// LoggingHandler always sets the ident field of the log to - +// +// Example: +// +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("This is a catch-all route")) +// }) +// loggedRouter := handlers.LoggingHandler(os.Stdout, r) +// http.ListenAndServe(":1123", loggedRouter) +func LoggingHandler(out io.Writer, h http.Handler) http.Handler { + return loggingHandler{out, h, writeLog} +} + +// CustomLoggingHandler provides a way to supply a custom log formatter +// while taking advantage of the mechanisms in this package. +func CustomLoggingHandler(out io.Writer, h http.Handler, f LogFormatter) http.Handler { + return loggingHandler{out, h, f} +} diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go new file mode 100644 index 0000000000000..281d753e95a28 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/proxy_headers.go @@ -0,0 +1,120 @@ +package handlers + +import ( + "net/http" + "regexp" + "strings" +) + +var ( + // De-facto standard header keys. + xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") + xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host") + xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto") + xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme") + xRealIP = http.CanonicalHeaderKey("X-Real-IP") +) + +var ( + // RFC7239 defines a new "Forwarded: " header designed to replace the + // existing use of X-Forwarded-* headers. + // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43. + forwarded = http.CanonicalHeaderKey("Forwarded") + // Allows for a sub-match of the first value after 'for=' to the next + // comma, semi-colon or space. The match is case-insensitive. + forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`) + // Allows for a sub-match for the first instance of scheme (http|https) + // prefixed by 'proto='. The match is case-insensitive. + protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`) +) + +// ProxyHeaders inspects common reverse proxy headers and sets the corresponding +// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP +// for the remote (client) IP address, X-Forwarded-Proto or X-Forwarded-Scheme +// for the scheme (http|https), X-Forwarded-Host for the host and the RFC7239 +// Forwarded header, which may include both client IPs and schemes. +// +// NOTE: This middleware should only be used when behind a reverse +// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are +// configured not to) strip these headers from client requests, or where these +// headers are accepted "as is" from a remote client (e.g. when Go is not behind +// a proxy), can manifest as a vulnerability if your application uses these +// headers for validating the 'trustworthiness' of a request. +func ProxyHeaders(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + // Set the remote IP with the value passed from the proxy. + if fwd := getIP(r); fwd != "" { + r.RemoteAddr = fwd + } + + // Set the scheme (proto) with the value passed from the proxy. + if scheme := getScheme(r); scheme != "" { + r.URL.Scheme = scheme + } + // Set the host with the value passed by the proxy + if r.Header.Get(xForwardedHost) != "" { + r.Host = r.Header.Get(xForwardedHost) + } + // Call the next handler in the chain. + h.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} + +// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239 +// Forwarded headers (in that order). +func getIP(r *http.Request) string { + var addr string + + switch { + case r.Header.Get(xForwardedFor) != "": + fwd := r.Header.Get(xForwardedFor) + // Only grab the first (client) address. Note that '192.168.0.1, + // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after + // the first may represent forwarding proxies earlier in the chain. + s := strings.Index(fwd, ", ") + if s == -1 { + s = len(fwd) + } + addr = fwd[:s] + case r.Header.Get(xRealIP) != "": + addr = r.Header.Get(xRealIP) + case r.Header.Get(forwarded) != "": + // match should contain at least two elements if the protocol was + // specified in the Forwarded header. The first element will always be + // the 'for=' capture, which we ignore. In the case of multiple IP + // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only + // extract the first, which should be the client IP. + if match := forRegex.FindStringSubmatch(r.Header.Get(forwarded)); len(match) > 1 { + // IPv6 addresses in Forwarded headers are quoted-strings. We strip + // these quotes. + addr = strings.Trim(match[1], `"`) + } + } + + return addr +} + +// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239 +// Forwarded headers (in that order). +func getScheme(r *http.Request) string { + var scheme string + + // Retrieve the scheme from X-Forwarded-Proto. + if proto := r.Header.Get(xForwardedProto); proto != "" { + scheme = strings.ToLower(proto) + } else if proto = r.Header.Get(xForwardedScheme); proto != "" { + scheme = strings.ToLower(proto) + } else if proto = r.Header.Get(forwarded); proto != "" { + // match should contain at least two elements if the protocol was + // specified in the Forwarded header. The first element will always be + // the 'proto=' capture, which we ignore. In the case of multiple proto + // parameters (invalid) we only extract the first. + if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 { + scheme = strings.ToLower(match[1]) + } + } + + return scheme +} diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go new file mode 100644 index 0000000000000..0d4f955ecbda0 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/recovery.go @@ -0,0 +1,98 @@ +package handlers + +import ( + "log" + "net/http" + "runtime/debug" +) + +// RecoveryHandlerLogger is an interface used by the recovering handler to print logs. +type RecoveryHandlerLogger interface { + Println(...interface{}) +} + +type recoveryHandler struct { + handler http.Handler + logger RecoveryHandlerLogger + printStack bool +} + +// RecoveryOption provides a functional approach to define +// configuration for a handler; such as setting the logging +// whether or not to print stack traces on panic. +type RecoveryOption func(http.Handler) + +func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler { + for _, option := range opts { + option(h) + } + + return h +} + +// RecoveryHandler is HTTP middleware that recovers from a panic, +// logs the panic, writes http.StatusInternalServerError, and +// continues to the next handler. +// +// Example: +// +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// panic("Unexpected error!") +// }) +// +// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) +func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + r := &recoveryHandler{handler: h} + return parseRecoveryOptions(r, opts...) + } +} + +// RecoveryLogger is a functional option to override +// the default logger. +func RecoveryLogger(logger RecoveryHandlerLogger) RecoveryOption { + return func(h http.Handler) { + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? + r.logger = logger + } +} + +// PrintRecoveryStack is a functional option to enable +// or disable printing stack traces on panic. +func PrintRecoveryStack(shouldPrint bool) RecoveryOption { + return func(h http.Handler) { + r := h.(*recoveryHandler) //nolint:errcheck //TODO: + // @bharat-rajani should return type-assertion error but would break the API? + r.printStack = shouldPrint + } +} + +func (h recoveryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + defer func() { + if err := recover(); err != nil { + w.WriteHeader(http.StatusInternalServerError) + h.log(err) + } + }() + + h.handler.ServeHTTP(w, req) +} + +func (h recoveryHandler) log(v ...interface{}) { + if h.logger != nil { + h.logger.Println(v...) + } else { + log.Println(v...) + } + + if h.printStack { + stack := string(debug.Stack()) + if h.logger != nil { + h.logger.Println(stack) + } else { + log.Println(stack) + } + } +} diff --git a/vendor/github.com/pkg/xattr/.gitignore b/vendor/github.com/pkg/xattr/.gitignore new file mode 100644 index 0000000000000..d8b32652e5a92 --- /dev/null +++ b/vendor/github.com/pkg/xattr/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test + +*.swp diff --git a/vendor/github.com/pkg/xattr/LICENSE b/vendor/github.com/pkg/xattr/LICENSE new file mode 100644 index 0000000000000..99d2e9dc8ff27 --- /dev/null +++ b/vendor/github.com/pkg/xattr/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2012 Dave Cheney. All rights reserved. +Copyright (c) 2014 Kuba Podgórski. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/xattr/README.md b/vendor/github.com/pkg/xattr/README.md new file mode 100644 index 0000000000000..0662c0208c572 --- /dev/null +++ b/vendor/github.com/pkg/xattr/README.md @@ -0,0 +1,45 @@ +[![GoDoc](https://godoc.org/github.com/pkg/xattr?status.svg)](http://godoc.org/github.com/pkg/xattr) +[![Go Report Card](https://goreportcard.com/badge/github.com/pkg/xattr)](https://goreportcard.com/report/github.com/pkg/xattr) +[![Build Status](https://github.com/pkg/xattr/workflows/build/badge.svg)](https://github.com/pkg/xattr/actions?query=workflow%3Abuild) +[![Codecov](https://codecov.io/gh/pkg/xattr/branch/master/graph/badge.svg)](https://codecov.io/gh/pkg/xattr) + +xattr +===== +Extended attribute support for Go (linux + darwin + freebsd + netbsd + solaris). + +"Extended attributes are name:value pairs associated permanently with files and directories, similar to the environment strings associated with a process. An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty." [See more...](https://en.wikipedia.org/wiki/Extended_file_attributes) + +`SetWithFlags` allows to additionally pass system flags to be forwarded to the underlying calls. FreeBSD and NetBSD do not support this and the parameter will be ignored. + +The `L` variants of all functions (`LGet/LSet/...`) are identical to `Get/Set/...` except that they +do not reference a symlink that appears at the end of a path. See +[GoDoc](http://godoc.org/github.com/pkg/xattr) for details. + +### Example +```go + const path = "/tmp/myfile" + const prefix = "user." + + if err := xattr.Set(path, prefix+"test", []byte("test-attr-value")); err != nil { + log.Fatal(err) + } + + var list []string + if list, err = xattr.List(path); err != nil { + log.Fatal(err) + } + + var data []byte + if data, err = xattr.Get(path, prefix+"test"); err != nil { + log.Fatal(err) + } + + if err = xattr.Remove(path, prefix+"test"); err != nil { + log.Fatal(err) + } + + // One can also specify the flags parameter to be passed to the OS. + if err := xattr.SetWithFlags(path, prefix+"test", []byte("test-attr-value"), xattr.XATTR_CREATE); err != nil { + log.Fatal(err) + } +``` diff --git a/vendor/github.com/pkg/xattr/xattr.go b/vendor/github.com/pkg/xattr/xattr.go new file mode 100644 index 0000000000000..e34e274d51373 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr.go @@ -0,0 +1,258 @@ +/* +Package xattr provides support for extended attributes on linux, darwin and freebsd. +Extended attributes are name:value pairs associated permanently with files and directories, +similar to the environment strings associated with a process. +An attribute may be defined or undefined. If it is defined, its value may be empty or non-empty. +More details you can find here: https://en.wikipedia.org/wiki/Extended_file_attributes . + +All functions are provided in triples: Get/LGet/FGet, Set/LSet/FSet etc. The "L" +variant will not follow a symlink at the end of the path, and "F" variant accepts +a file descriptor instead of a path. + +Example for "L" variant, assuming path is "/symlink1/symlink2", where both components are +symlinks: +Get will follow "symlink1" and "symlink2" and operate on the target of +"symlink2". LGet will follow "symlink1" but operate directly on "symlink2". +*/ +package xattr + +import ( + "os" + "syscall" +) + +// Error records an error and the operation, file path and attribute that caused it. +type Error struct { + Op string + Path string + Name string + Err error +} + +func (e *Error) Unwrap() error { return e.Err } + +func (e *Error) Error() (errstr string) { + if e.Op != "" { + errstr += e.Op + } + if e.Path != "" { + if errstr != "" { + errstr += " " + } + errstr += e.Path + } + if e.Name != "" { + if errstr != "" { + errstr += " " + } + errstr += e.Name + } + if e.Err != nil { + if errstr != "" { + errstr += ": " + } + errstr += e.Err.Error() + } + return +} + +// Get retrieves extended attribute data associated with path. It will follow +// all symlinks along the path. +func Get(path, name string) ([]byte, error) { + return get(path, name, func(name string, data []byte) (int, error) { + return getxattr(path, name, data) + }) +} + +// LGet is like Get but does not follow a symlink at the end of the path. +func LGet(path, name string) ([]byte, error) { + return get(path, name, func(name string, data []byte) (int, error) { + return lgetxattr(path, name, data) + }) +} + +// FGet is like Get but accepts a os.File instead of a file path. +func FGet(f *os.File, name string) ([]byte, error) { + return get(f.Name(), name, func(name string, data []byte) (int, error) { + return fgetxattr(f, name, data) + }) +} + +type getxattrFunc func(name string, data []byte) (int, error) + +// get contains the buffer allocation logic used by both Get and LGet. +func get(path string, name string, getxattrFunc getxattrFunc) ([]byte, error) { + const ( + // Start with a 1 KB buffer for the xattr value + initialBufSize = 1024 + + // The theoretical maximum xattr value size on MacOS is 64 MB. On Linux it's + // much smaller: documented at 64 KB. However, at least on TrueNAS SCALE, a + // Debian-based Linux distro, it can be larger. + maxBufSize = 64 * 1024 * 1024 + + // Function name as reported in error messages + myname = "xattr.get" + ) + + size := initialBufSize + for { + data := make([]byte, size) + read, err := getxattrFunc(name, data) + + // If the buffer was too small to fit the value, Linux and MacOS react + // differently: + // Linux: returns an ERANGE error and "-1" bytes. However, the TrueNAS + // SCALE distro sometimes returns E2BIG. + // MacOS: truncates the value and returns "size" bytes. If the value + // happens to be exactly as big as the buffer, we cannot know if it was + // truncated, and we retry with a bigger buffer. Contrary to documentation, + // MacOS never seems to return ERANGE! + // To keep the code simple, we always check both conditions, and sometimes + // double the buffer size without it being strictly necessary. + if err == syscall.ERANGE || err == syscall.E2BIG || read == size { + // The buffer was too small. Try again. + size <<= 1 + if size >= maxBufSize { + return nil, &Error{myname, path, name, syscall.EOVERFLOW} + } + continue + } + if err != nil { + return nil, &Error{myname, path, name, err} + } + return data[:read], nil + } +} + +// Set associates name and data together as an attribute of path. +func Set(path, name string, data []byte) error { + if err := setxattr(path, name, data, 0); err != nil { + return &Error{"xattr.Set", path, name, err} + } + return nil +} + +// LSet is like Set but does not follow a symlink at +// the end of the path. +func LSet(path, name string, data []byte) error { + if err := lsetxattr(path, name, data, 0); err != nil { + return &Error{"xattr.LSet", path, name, err} + } + return nil +} + +// FSet is like Set but accepts a os.File instead of a file path. +func FSet(f *os.File, name string, data []byte) error { + if err := fsetxattr(f, name, data, 0); err != nil { + return &Error{"xattr.FSet", f.Name(), name, err} + } + return nil +} + +// SetWithFlags associates name and data together as an attribute of path. +// Forwards the flags parameter to the syscall layer. +func SetWithFlags(path, name string, data []byte, flags int) error { + if err := setxattr(path, name, data, flags); err != nil { + return &Error{"xattr.SetWithFlags", path, name, err} + } + return nil +} + +// LSetWithFlags is like SetWithFlags but does not follow a symlink at +// the end of the path. +func LSetWithFlags(path, name string, data []byte, flags int) error { + if err := lsetxattr(path, name, data, flags); err != nil { + return &Error{"xattr.LSetWithFlags", path, name, err} + } + return nil +} + +// FSetWithFlags is like SetWithFlags but accepts a os.File instead of a file path. +func FSetWithFlags(f *os.File, name string, data []byte, flags int) error { + if err := fsetxattr(f, name, data, flags); err != nil { + return &Error{"xattr.FSetWithFlags", f.Name(), name, err} + } + return nil +} + +// Remove removes the attribute associated with the given path. +func Remove(path, name string) error { + if err := removexattr(path, name); err != nil { + return &Error{"xattr.Remove", path, name, err} + } + return nil +} + +// LRemove is like Remove but does not follow a symlink at the end of the +// path. +func LRemove(path, name string) error { + if err := lremovexattr(path, name); err != nil { + return &Error{"xattr.LRemove", path, name, err} + } + return nil +} + +// FRemove is like Remove but accepts a os.File instead of a file path. +func FRemove(f *os.File, name string) error { + if err := fremovexattr(f, name); err != nil { + return &Error{"xattr.FRemove", f.Name(), name, err} + } + return nil +} + +// List retrieves a list of names of extended attributes associated +// with the given path in the file system. +func List(path string) ([]string, error) { + return list(path, func(data []byte) (int, error) { + return listxattr(path, data) + }) +} + +// LList is like List but does not follow a symlink at the end of the +// path. +func LList(path string) ([]string, error) { + return list(path, func(data []byte) (int, error) { + return llistxattr(path, data) + }) +} + +// FList is like List but accepts a os.File instead of a file path. +func FList(f *os.File) ([]string, error) { + return list(f.Name(), func(data []byte) (int, error) { + return flistxattr(f, data) + }) +} + +type listxattrFunc func(data []byte) (int, error) + +// list contains the buffer allocation logic used by both List and LList. +func list(path string, listxattrFunc listxattrFunc) ([]string, error) { + myname := "xattr.list" + // find size. + size, err := listxattrFunc(nil) + if err != nil { + return nil, &Error{myname, path, "", err} + } + if size > 0 { + // `size + 1` because of ERANGE error when reading + // from a SMB1 mount point (https://github.com/pkg/xattr/issues/16). + buf := make([]byte, size+1) + // Read into buffer of that size. + read, err := listxattrFunc(buf) + if err != nil { + return nil, &Error{myname, path, "", err} + } + return stringsFromByteSlice(buf[:read]), nil + } + return []string{}, nil +} + +// bytePtrFromSlice returns a pointer to array of bytes and a size. +func bytePtrFromSlice(data []byte) (ptr *byte, size int) { + size = len(data) + if size > 0 { + ptr = &data[0] + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_bsd.go b/vendor/github.com/pkg/xattr/xattr_bsd.go new file mode 100644 index 0000000000000..f4a3f95390490 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_bsd.go @@ -0,0 +1,201 @@ +//go:build freebsd || netbsd +// +build freebsd netbsd + +package xattr + +import ( + "os" + "syscall" + "unsafe" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + EXTATTR_NAMESPACE_USER = 1 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENOATTR +) + +func getxattr(path string, name string, data []byte) (int, error) { + return sysGet(syscall.SYS_EXTATTR_GET_FILE, path, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return sysGet(syscall.SYS_EXTATTR_GET_LINK, path, name, data) +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return getxattr(f.Name(), name, data) +} + +// sysGet is called by getxattr and lgetxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysGet(syscallNum uintptr, path string, name string, data []byte) (int, error) { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_get_file( + const char *path, + int attrnamespace, + const char *attrname, + void *data, + size_t nbytes); + + ssize_t extattr_get_link( + const char *path, + int attrnamespace, + const char *attrname, + void *data, + size_t nbytes); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) + if err != syscall.Errno(0) { + return int(r0), err + } + return int(r0), nil +} + +func setxattr(path string, name string, data []byte, flags int) error { + return sysSet(syscall.SYS_EXTATTR_SET_FILE, path, name, data) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return sysSet(syscall.SYS_EXTATTR_SET_LINK, path, name, data) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return setxattr(f.Name(), name, data, flags) +} + +// sysSet is called by setxattr and lsetxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysSet(syscallNum uintptr, path string, name string, data []byte) error { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_set_file( + const char *path, + int attrnamespace, + const char *attrname, + const void *data, + size_t nbytes + ); + + ssize_t extattr_set_link( + const char *path, + int attrnamespace, + const char *attrname, + const void *data, + size_t nbytes + ); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0) + if err != syscall.Errno(0) { + return err + } + if int(r0) != nbytes { + return syscall.E2BIG + } + return nil +} + +func removexattr(path string, name string) error { + return sysRemove(syscall.SYS_EXTATTR_DELETE_FILE, path, name) +} + +func lremovexattr(path string, name string) error { + return sysRemove(syscall.SYS_EXTATTR_DELETE_LINK, path, name) +} + +func fremovexattr(f *os.File, name string) error { + return removexattr(f.Name(), name) +} + +// sysSet is called by removexattr and lremovexattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysRemove(syscallNum uintptr, path string, name string) error { + /* + int extattr_delete_file( + const char *path, + int attrnamespace, + const char *attrname + ); + + int extattr_delete_link( + const char *path, + int attrnamespace, + const char *attrname + ); + */ + _, _, err := syscall.Syscall(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(syscall.StringBytePtr(name))), + ) + if err != syscall.Errno(0) { + return err + } + return nil +} + +func listxattr(path string, data []byte) (int, error) { + return sysList(syscall.SYS_EXTATTR_LIST_FILE, path, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return sysList(syscall.SYS_EXTATTR_LIST_LINK, path, data) +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return listxattr(f.Name(), data) +} + +// sysSet is called by listxattr and llistxattr with the appropriate syscall +// number. This works because syscalls have the same signature and return +// values. +func sysList(syscallNum uintptr, path string, data []byte) (int, error) { + ptr, nbytes := bytePtrFromSlice(data) + /* + ssize_t extattr_list_file( + const char *path, + int attrnamespace, + void *data, + size_t nbytes + ); + + ssize_t extattr_list_link( + const char *path, + int attrnamespace, + void *data, + size_t nbytes + ); + */ + r0, _, err := syscall.Syscall6(syscallNum, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), + EXTATTR_NAMESPACE_USER, uintptr(unsafe.Pointer(ptr)), uintptr(nbytes), 0, 0) + if err != syscall.Errno(0) { + return int(r0), err + } + return int(r0), nil +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On FreeBSD, each entry consists of a single byte containing the length +// of the attribute name, followed by the attribute name. +// The name is _not_ terminated by NULL. +func stringsFromByteSlice(buf []byte) (result []string) { + index := 0 + for index < len(buf) { + next := index + 1 + int(buf[index]) + result = append(result, string(buf[index+1:next])) + index = next + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_darwin.go b/vendor/github.com/pkg/xattr/xattr_darwin.go new file mode 100644 index 0000000000000..ee7a501dae5cb --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_darwin.go @@ -0,0 +1,90 @@ +//go:build darwin +// +build darwin + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +// See https://opensource.apple.com/source/xnu/xnu-1504.15.3/bsd/sys/xattr.h.auto.html +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_NOFOLLOW = 0x0001 + XATTR_CREATE = 0x0002 + XATTR_REPLACE = 0x0004 + XATTR_NOSECURITY = 0x0008 + XATTR_NODEFAULT = 0x0010 + XATTR_SHOWCOMPRESSION = 0x0020 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENOATTR +) + +func getxattr(path string, name string, data []byte) (int, error) { + return unix.Getxattr(path, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return unix.Lgetxattr(path, name, data) +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return getxattr(f.Name(), name, data) +} + +func setxattr(path string, name string, data []byte, flags int) error { + return unix.Setxattr(path, name, data, flags) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return unix.Lsetxattr(path, name, data, flags) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return setxattr(f.Name(), name, data, flags) +} + +func removexattr(path string, name string) error { + return unix.Removexattr(path, name) +} + +func lremovexattr(path string, name string) error { + return unix.Lremovexattr(path, name) +} + +func fremovexattr(f *os.File, name string) error { + return removexattr(f.Name(), name) +} + +func listxattr(path string, data []byte) (int, error) { + return unix.Listxattr(path, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return unix.Llistxattr(path, data) +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return listxattr(f.Name(), data) +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On Darwin and Linux, each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_linux.go b/vendor/github.com/pkg/xattr/xattr_linux.go new file mode 100644 index 0000000000000..879085ee5d453 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_linux.go @@ -0,0 +1,142 @@ +//go:build linux +// +build linux + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_CREATE = unix.XATTR_CREATE + XATTR_REPLACE = unix.XATTR_REPLACE + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENODATA +) + +// On Linux, FUSE and CIFS filesystems can return EINTR for interrupted system +// calls. This function works around this by retrying system calls until they +// stop returning EINTR. +// +// See https://github.com/golang/go/commit/6b420169d798c7ebe733487b56ea5c3fa4aab5ce. +func ignoringEINTR(fn func() error) (err error) { + for { + err = fn() + if err != unix.EINTR { + break + } + } + return err +} + +func getxattr(path string, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Getxattr(path, name, data) + return err + }) + return r, err +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Lgetxattr(path, name, data) + return err + }) + return r, err +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Fgetxattr(int(f.Fd()), name, data) + return err + }) + return r, err +} + +func setxattr(path string, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Setxattr(path, name, data, flags) + }) +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Lsetxattr(path, name, data, flags) + }) +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return ignoringEINTR(func() (err error) { + return unix.Fsetxattr(int(f.Fd()), name, data, flags) + }) +} + +func removexattr(path string, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Removexattr(path, name) + }) +} + +func lremovexattr(path string, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Lremovexattr(path, name) + }) +} + +func fremovexattr(f *os.File, name string) error { + return ignoringEINTR(func() (err error) { + return unix.Fremovexattr(int(f.Fd()), name) + }) +} + +func listxattr(path string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Listxattr(path, data) + return err + }) + return r, err +} + +func llistxattr(path string, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Llistxattr(path, data) + return err + }) + return r, err +} + +func flistxattr(f *os.File, data []byte) (int, error) { + var r int + err := ignoringEINTR(func() (err error) { + r, err = unix.Flistxattr(int(f.Fd()), data) + return err + }) + return r, err +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// On Darwin and Linux, each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_solaris.go b/vendor/github.com/pkg/xattr/xattr_solaris.go new file mode 100644 index 0000000000000..7c98b4afbac25 --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_solaris.go @@ -0,0 +1,175 @@ +//go:build solaris +// +build solaris + +package xattr + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + // XATTR_SUPPORTED will be true if the current platform is supported + XATTR_SUPPORTED = true + + XATTR_CREATE = 0x1 + XATTR_REPLACE = 0x2 + + // ENOATTR is not exported by the syscall package on Linux, because it is + // an alias for ENODATA. We export it here so it is available on all + // our supported platforms. + ENOATTR = syscall.ENODATA +) + +func getxattr(path string, name string, data []byte) (int, error) { + f, err := openNonblock(path) + if err != nil { + return 0, err + } + defer func() { + _ = f.Close() + }() + return fgetxattr(f, name, data) +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return 0, unix.ENOTSUP +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + fd, err := unix.Openat(int(f.Fd()), name, unix.O_RDONLY|unix.O_XATTR, 0) + if err != nil { + return 0, err + } + defer func() { + _ = unix.Close(fd) + }() + return unix.Read(fd, data) +} + +func setxattr(path string, name string, data []byte, flags int) error { + f, err := openNonblock(path) + if err != nil { + return err + } + err = fsetxattr(f, name, data, flags) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return unix.ENOTSUP +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + mode := unix.O_WRONLY | unix.O_XATTR + if flags&XATTR_REPLACE != 0 { + mode |= unix.O_TRUNC + } else if flags&XATTR_CREATE != 0 { + mode |= unix.O_CREAT | unix.O_EXCL + } else { + mode |= unix.O_CREAT | unix.O_TRUNC + } + fd, err := unix.Openat(int(f.Fd()), name, mode, 0666) + if err != nil { + return err + } + if _, err = unix.Write(fd, data); err != nil { + _ = unix.Close(fd) + return err + } + return unix.Close(fd) +} + +func removexattr(path string, name string) error { + mode := unix.O_RDONLY | unix.O_XATTR | unix.O_NONBLOCK | unix.O_CLOEXEC + fd, err := unix.Open(path, mode, 0) + if err != nil { + return err + } + f := os.NewFile(uintptr(fd), path) + defer func() { + _ = f.Close() + }() + return fremovexattr(f, name) +} + +func lremovexattr(path string, name string) error { + return unix.ENOTSUP +} + +func fremovexattr(f *os.File, name string) error { + fd, err := unix.Openat(int(f.Fd()), ".", unix.O_XATTR, 0) + if err != nil { + return err + } + defer func() { + _ = unix.Close(fd) + }() + return unix.Unlinkat(fd, name, 0) +} + +func listxattr(path string, data []byte) (int, error) { + f, err := openNonblock(path) + if err != nil { + return 0, err + } + defer func() { + _ = f.Close() + }() + return flistxattr(f, data) +} + +func llistxattr(path string, data []byte) (int, error) { + return 0, unix.ENOTSUP +} + +func flistxattr(f *os.File, data []byte) (int, error) { + fd, err := unix.Openat(int(f.Fd()), ".", unix.O_RDONLY|unix.O_XATTR, 0) + if err != nil { + return 0, unix.ENOTSUP + } + xf := os.NewFile(uintptr(fd), f.Name()) + defer func() { + _ = xf.Close() + }() + names, err := xf.Readdirnames(-1) + if err != nil { + return 0, err + } + var buf []byte + for _, name := range names { + buf = append(buf, append([]byte(name), '\000')...) + } + if data == nil { + return len(buf), nil + } + return copy(data, buf), nil +} + +// Like os.Open, but passes O_NONBLOCK to the open(2) syscall. +func openNonblock(path string) (*os.File, error) { + fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC|unix.O_NONBLOCK, 0) + if err != nil { + return nil, err + } + return os.NewFile(uintptr(fd), path), err +} + +// stringsFromByteSlice converts a sequence of attributes to a []string. +// We simulate Linux/Darwin, where each entry is a NULL-terminated string. +func stringsFromByteSlice(buf []byte) (result []string) { + offset := 0 + for index, b := range buf { + if b == 0 { + result = append(result, string(buf[offset:index])) + offset = index + 1 + } + } + return +} diff --git a/vendor/github.com/pkg/xattr/xattr_unsupported.go b/vendor/github.com/pkg/xattr/xattr_unsupported.go new file mode 100644 index 0000000000000..8886fbdc4216e --- /dev/null +++ b/vendor/github.com/pkg/xattr/xattr_unsupported.go @@ -0,0 +1,70 @@ +//go:build !linux && !freebsd && !netbsd && !darwin && !solaris +// +build !linux,!freebsd,!netbsd,!darwin,!solaris + +package xattr + +import ( + "os" + "syscall" +) + +const ( + // We need to use the default for non supported operating systems + ENOATTR = syscall.Errno(0x59) +) + +// XATTR_SUPPORTED will be true if the current platform is supported +const XATTR_SUPPORTED = false + +func getxattr(path string, name string, data []byte) (int, error) { + return 0, nil +} + +func lgetxattr(path string, name string, data []byte) (int, error) { + return 0, nil +} + +func fgetxattr(f *os.File, name string, data []byte) (int, error) { + return 0, nil +} + +func setxattr(path string, name string, data []byte, flags int) error { + return nil +} + +func lsetxattr(path string, name string, data []byte, flags int) error { + return nil +} + +func fsetxattr(f *os.File, name string, data []byte, flags int) error { + return nil +} + +func removexattr(path string, name string) error { + return nil +} + +func lremovexattr(path string, name string) error { + return nil +} + +func fremovexattr(f *os.File, name string) error { + return nil +} + +func listxattr(path string, data []byte) (int, error) { + return 0, nil +} + +func llistxattr(path string, data []byte) (int, error) { + return 0, nil +} + +func flistxattr(f *os.File, data []byte) (int, error) { + return 0, nil +} + +// dummy +func stringsFromByteSlice(buf []byte) (result []string) { + return []string{} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index ab091cf6ade33..18436eaedffd8 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -42,6 +42,8 @@ type config struct { TracerProvider trace.TracerProvider MeterProvider metric.MeterProvider SpanStartOptions []trace.SpanStartOption + SpanAttributes []attribute.KeyValue + MetricAttributes []attribute.KeyValue ReceivedEvent bool SentEvent bool @@ -257,3 +259,29 @@ func (o spanStartOption) apply(c *config) { func WithSpanOptions(opts ...trace.SpanStartOption) Option { return spanStartOption{opts} } + +type spanAttributesOption struct{ a []attribute.KeyValue } + +func (o spanAttributesOption) apply(c *config) { + if o.a != nil { + c.SpanAttributes = o.a + } +} + +// WithSpanAttributes returns an Option to add custom attributes to the spans. +func WithSpanAttributes(a ...attribute.KeyValue) Option { + return spanAttributesOption{a: a} +} + +type metricAttributesOption struct{ a []attribute.KeyValue } + +func (o metricAttributesOption) apply(c *config) { + if o.a != nil { + c.MetricAttributes = o.a + } +} + +// WithMetricAttributes returns an Option to add custom attributes to the metrics. +func WithMetricAttributes(a ...attribute.KeyValue) Option { + return metricAttributesOption{a: a} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index 201867a86944a..fbcbfb84e047e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -62,11 +62,11 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { @@ -102,11 +102,11 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont ctx, name, trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(attrs...), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), ) gctx := gRPCContext{ - metricAttrs: attrs, + metricAttrs: append(attrs, h.config.MetricAttributes...), record: true, } if h.config.Filter != nil { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index a15d06cb0c451..04f425edfefea 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.53.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 214acaf581ef1..5d6e6156b7beb 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Server HTTP metrics. -const ( - serverRequestSize = "http.server.request.size" // Incoming request bytes total - serverResponseSize = "http.server.response.size" // Incoming response bytes total - serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds -) - // Client HTTP metrics. const ( clientRequestSize = "http.client.request.size" // Outgoing request bytes total diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go index f0a9bb9efeb5c..a01bfafbe0773 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -8,6 +8,8 @@ import ( "net/http" "net/http/httptrace" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" @@ -33,8 +35,9 @@ type config struct { SpanNameFormatter func(string, *http.Request) string ClientTrace func(context.Context) *httptrace.ClientTrace - TracerProvider trace.TracerProvider - MeterProvider metric.MeterProvider + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + MetricAttributesFn func(*http.Request) []attribute.KeyValue } // Option interface used for setting optional config properties. @@ -194,3 +197,11 @@ func WithServerName(server string) Option { c.ServerName = server }) } + +// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue. +// These attributes will be included in metrics for every request. +func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option { + return optionFunc(func(c *config) { + c.MetricAttributesFn = metricAttributesFn + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index d01bdccf40dc5..33580a35b774b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -9,11 +9,9 @@ import ( "github.com/felixge/httpsnoop" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -24,7 +22,6 @@ type middleware struct { server string tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption readEvent bool @@ -34,10 +31,7 @@ type middleware struct { publicEndpoint bool publicEndpointFn func(*http.Request) bool - traceSemconv semconv.HTTPServer - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - serverLatencyMeasure metric.Float64Histogram + semconv semconv.HTTPServer } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -56,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler { h := middleware{ operation: operation, - - traceSemconv: semconv.NewHTTPServer(), } defaultOpts := []Option{ @@ -67,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han c := newConfig(append(defaultOpts, opts...)...) h.configure(c) - h.createMeasures() return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -78,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han func (h *middleware) configure(c *config) { h.tracer = c.Tracer - h.meter = c.Meter h.propagators = c.Propagators h.spanStartOptions = c.SpanStartOptions h.readEvent = c.ReadEvent @@ -88,6 +78,7 @@ func (h *middleware) configure(c *config) { h.publicEndpoint = c.PublicEndpoint h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName + h.semconv = semconv.NewHTTPServer(c.Meter) } func handleErr(err error) { @@ -96,30 +87,6 @@ func handleErr(err error) { } } -func (h *middleware) createMeasures() { - var err error - h.requestBytesCounter, err = h.meter.Int64Counter( - serverRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - h.responseBytesCounter, err = h.meter.Int64Counter( - serverResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - h.serverLatencyMeasure, err = h.meter.Float64Histogram( - serverDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of inbound HTTP requests."), - ) - handleErr(err) -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -134,7 +101,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) opts := []trace.SpanStartOption{ - trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...), + trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...), } opts = append(opts, h.spanStartOptions...) @@ -166,14 +133,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, readRecordFunc) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - bw.record = readRecordFunc - r.Body = &bw + r.Body = bw } writeRecordFunc := func(int64) {} @@ -183,13 +148,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http } } - rww := &respWriterWrapper{ - ResponseWriter: w, - record: writeRecordFunc, - ctx: ctx, - props: h.propagators, - statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything - } + rww := request.NewRespWriterWrapper(w, writeRecordFunc) // Wrap w to use our ResponseWriter methods while also exposing // other interfaces that w may implement (http.CloseNotifier, @@ -217,35 +176,35 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http next.ServeHTTP(w, r.WithContext(ctx)) - span.SetStatus(semconv.ServerStatus(rww.statusCode)) - span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ - StatusCode: rww.statusCode, - ReadBytes: bw.read.Load(), - ReadError: bw.err, - WriteBytes: rww.written, - WriteError: rww.err, + statusCode := rww.StatusCode() + bytesWritten := rww.BytesWritten() + span.SetStatus(h.semconv.Status(statusCode)) + span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{ + StatusCode: statusCode, + ReadBytes: bw.BytesRead(), + ReadError: bw.Error(), + WriteBytes: bytesWritten, + WriteError: rww.Error(), })...) - // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) - if rww.statusCode > 0 { - attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) - h.responseBytesCounter.Add(ctx, rww.written, addOpts...) - // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.serverLatencyMeasure.Record(ctx, elapsedTime, o) + h.semconv.RecordMetrics(ctx, semconv.MetricData{ + ServerName: h.server, + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + RequestSize: bw.BytesRead(), + ResponseSize: bytesWritten, + ElapsedTime: elapsedTime, + }) } // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { - attr := semconv.NewHTTPServer().Route(route) + attr := semconv.NewHTTPServer(nil).Route(route) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { span := trace.SpanFromContext(r.Context()) span.SetAttributes(attr) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go new file mode 100644 index 0000000000000..a945f55661658 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "io" + "sync" +) + +var _ io.ReadCloser = &BodyWrapper{} + +// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error. +type BodyWrapper struct { + io.ReadCloser + OnRead func(n int64) // must not be nil + + mu sync.Mutex + read int64 + err error +} + +// NewBodyWrapper creates a new BodyWrapper. +// +// The onRead attribute is a callback that will be called every time the data +// is read, with the number of bytes being read. +func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper { + return &BodyWrapper{ + ReadCloser: body, + OnRead: onRead, + } +} + +// Read reads the data from the io.ReadCloser, and stores the number of bytes +// read and the error. +func (w *BodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + + w.updateReadData(n1, err) + w.OnRead(n1) + return n, err +} + +func (w *BodyWrapper) updateReadData(n int64, err error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.read += n + if err != nil { + w.err = err + } +} + +// Closes closes the io.ReadCloser. +func (w *BodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +// BytesRead returns the number of bytes read up to this point. +func (w *BodyWrapper) BytesRead() int64 { + w.mu.Lock() + defer w.mu.Unlock() + + return w.read +} + +// Error returns the last error. +func (w *BodyWrapper) Error() error { + w.mu.Lock() + defer w.mu.Unlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go new file mode 100644 index 0000000000000..aea171fb260b5 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -0,0 +1,112 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +import ( + "net/http" + "sync" +) + +var _ http.ResponseWriter = &RespWriterWrapper{} + +// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the first written statusCode. +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc) +// that may be useful when using it in real life situations. +type RespWriterWrapper struct { + http.ResponseWriter + OnWrite func(n int64) // must not be nil + + mu sync.RWMutex + written int64 + statusCode int + err error + wroteHeader bool +} + +// NewRespWriterWrapper creates a new RespWriterWrapper. +// +// The onWrite attribute is a callback that will be called every time the data +// is written, with the number of bytes that were written. +func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper { + return &RespWriterWrapper{ + ResponseWriter: w, + OnWrite: onWrite, + statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything + } +} + +// Write writes the bytes array into the [ResponseWriter], and tracks the +// number of bytes written and last error. +func (w *RespWriterWrapper) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(http.StatusOK) + + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.OnWrite(n1) + w.written += n1 + w.err = err + return n, err +} + +// WriteHeader persists initial statusCode for span attribution. +// All calls to WriteHeader will be propagated to the underlying ResponseWriter +// and will persist the statusCode from the first call. +// Blocking consecutive calls to WriteHeader alters expected behavior and will +// remove warning logs from net/http where developers will notice incorrect handler implementations. +func (w *RespWriterWrapper) WriteHeader(statusCode int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.writeHeader(statusCode) +} + +// writeHeader persists the status code for span attribution, and propagates +// the call to the underlying ResponseWriter. +// It does not acquire a lock, and therefore assumes that is being handled by a +// parent method. +func (w *RespWriterWrapper) writeHeader(statusCode int) { + if !w.wroteHeader { + w.wroteHeader = true + w.statusCode = statusCode + } + w.ResponseWriter.WriteHeader(statusCode) +} + +// Flush implements [http.Flusher]. +func (w *RespWriterWrapper) Flush() { + w.WriteHeader(http.StatusOK) + + if f, ok := w.ResponseWriter.(http.Flusher); ok { + f.Flush() + } +} + +// BytesWritten returns the number of bytes written. +func (w *RespWriterWrapper) BytesWritten() int64 { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.written +} + +// BytesWritten returns the HTTP status code that was sent. +func (w *RespWriterWrapper) StatusCode() int { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.statusCode +} + +// Error returns the last error. +func (w *RespWriterWrapper) Error() error { + w.mu.RLock() + defer w.mu.RUnlock() + + return w.err +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 3ec0ad00c81f8..9cae4cab86af1 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -4,6 +4,7 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "context" "fmt" "net/http" "os" @@ -11,6 +12,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/metric" ) type ResponseTelemetry struct { @@ -23,6 +25,11 @@ type ResponseTelemetry struct { type HTTPServer struct { duplicate bool + + // Old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -63,15 +70,10 @@ func (s HTTPServer) Route(route string) attribute.KeyValue { return oldHTTPServer{}.Route(route) } -func NewHTTPServer() HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE")) - return HTTPServer{duplicate: env == "http/dup"} -} - -// ServerStatus returns a span status code and message for an HTTP status code +// Status returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. -func ServerStatus(code int) (codes.Code, string) { +func (s HTTPServer) Status(code int) (codes.Code, string) { if code < 100 || code >= 600 { return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) } @@ -80,3 +82,84 @@ func ServerStatus(code int) (codes.Code, string) { } return codes.Unset, "" } + +type MetricData struct { + ServerName string + Req *http.Request + StatusCode int + AdditionalAttributes []attribute.KeyValue + + RequestSize int64 + ResponseSize int64 + ElapsedTime float64 +} + +func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { + if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { + // This will happen if an HTTPServer{} is used insted of NewHTTPServer. + return + } + + attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := []metric.AddOption{o} // Allocate vararg slice once. + s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + + // TODO: Duplicate Metrics +} + +func NewHTTPServer(meter metric.Meter) HTTPServer { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + duplicate := env == "http/dup" + server := HTTPServer{ + duplicate: duplicate, + } + server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter) + return server +} + +type HTTPClient struct { + duplicate bool +} + +func NewHTTPClient() HTTPClient { + env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + return HTTPClient{duplicate: env == "http/dup"} +} + +// RequestTraceAttrs returns attributes for an HTTP request made by a client. +func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...) + } + return oldHTTPClient{}.RequestTraceAttrs(req) +} + +// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client. +func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + if c.duplicate { + return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...) + } + + return oldHTTPClient{}.ResponseTraceAttrs(resp) +} + +func (c HTTPClient) Status(code int) (codes.Code, string) { + if code < 100 || code >= 600 { + return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code) + } + if code >= 400 { + return codes.Error, "" + } + return codes.Unset, "" +} + +func (c HTTPClient) ErrorType(err error) attribute.KeyValue { + if c.duplicate { + return newHTTPClient{}.ErrorType(err) + } + + return attribute.KeyValue{} +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go similarity index 57% rename from vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go rename to vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index 0c5d4c4608a28..745b8c67bc40c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.24.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -4,11 +4,14 @@ package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" import ( + "fmt" "net/http" + "reflect" + "strconv" "strings" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) type newHTTPServer struct{} @@ -195,3 +198,151 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke func (n newHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } + +type newHTTPClient struct{} + +// RequestTraceAttrs returns trace attributes for an HTTP request made by a client. +func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + /* + below attributes are returned: + - http.request.method + - http.request.method.original + - url.full + - server.address + - server.port + - network.protocol.name + - network.protocol.version + */ + numOfAttributes := 3 // URL, server address, proto, and method. + + var urlHost string + if req.URL != nil { + urlHost = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{urlHost, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if eligiblePort > 0 { + numOfAttributes++ + } + useragent := req.UserAgent() + if useragent != "" { + numOfAttributes++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + numOfAttributes++ + } + if protoVersion != "" { + numOfAttributes++ + } + + method, originalMethod := n.method(req.Method) + if originalMethod != (attribute.KeyValue{}) { + numOfAttributes++ + } + + attrs := make([]attribute.KeyValue, 0, numOfAttributes) + + attrs = append(attrs, method) + if originalMethod != (attribute.KeyValue{}) { + attrs = append(attrs, originalMethod) + } + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, semconvNew.URLFull(u)) + + attrs = append(attrs, semconvNew.ServerAddress(requestHost)) + if eligiblePort > 0 { + attrs = append(attrs, semconvNew.ServerPort(eligiblePort)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + return attrs +} + +// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client. +func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + /* + below attributes are returned: + - http.response.status_code + - error.type + */ + var count int + if resp.StatusCode > 0 { + count++ + } + + if isErrorStatusCode(resp.StatusCode) { + count++ + } + + attrs := make([]attribute.KeyValue, 0, count) + if resp.StatusCode > 0 { + attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode)) + } + + if isErrorStatusCode(resp.StatusCode) { + errorType := strconv.Itoa(resp.StatusCode) + attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType)) + } + return attrs +} + +func (n newHTTPClient) ErrorType(err error) attribute.KeyValue { + t := reflect.TypeOf(err) + var value string + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + value = t.String() + } else { + value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) + } + + if value == "" { + return semconvNew.ErrorTypeOther + } + + return semconvNew.ErrorTypeKey.String(value) +} + +func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) { + if method == "" { + return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} + } + if attr, ok := methodLookup[method]; ok { + return attr, attribute.KeyValue{} + } + + orig := semconvNew.HTTPRequestMethodOriginal(method) + if attr, ok := methodLookup[strings.ToUpper(method)]; ok { + return attr, orig + } + return semconvNew.HTTPRequestMethodGet, orig +} + +func isErrorStatusCode(code int) bool { + return code >= 400 || code < 100 +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index e7f293761bd02..e6e14924f5790 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -9,8 +9,9 @@ import ( "strconv" "strings" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0" + semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) // splitHostPort splits a network address hostport of the form "host", @@ -49,7 +50,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Byte size checked 16 above. } func requiredHTTPPort(https bool, port int) int { // nolint:revive @@ -89,3 +90,9 @@ var methodLookup = map[string]attribute.KeyValue{ http.MethodPut: semconvNew.HTTPRequestMethodPut, http.MethodTrace: semconvNew.HTTPRequestMethodTrace, } + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c3e838aaa5422..c999b05e675b2 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -7,9 +7,13 @@ import ( "errors" "io" "net/http" + "slices" + "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) @@ -72,3 +76,117 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue { func HTTPStatusCode(status int) attribute.KeyValue { return semconv.HTTPStatusCode(status) } + +// Server HTTP metrics. +const ( + serverRequestSize = "http.server.request.size" // Incoming request bytes total + serverResponseSize = "http.server.response.size" // Incoming response bytes total + serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds +) + +func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + var err error + requestBytesCounter, err := meter.Int64Counter( + serverRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + serverResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + serverLatencyMeasure, err := meter.Float64Histogram( + serverDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of inbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, serverLatencyMeasure +} + +func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + n := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + o.methodMetric(req.Method), + o.scheme(req.TLS != nil), + semconv.NetHostName(host)) + + if hostPort > 0 { + attributes = append(attributes, semconv.NetHostPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconv.NetProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconv.NetProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} + +func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconv.HTTPSchemeHTTPS + } + return semconv.HTTPSchemeHTTP +} + +type oldHTTPClient struct{} + +func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue { + return semconvutil.HTTPClientRequest(req) +} + +func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { + return semconvutil.HTTPClientResponse(resp) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index a9a9226b39afa..b80a1db61fa0f 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) { if err != nil { return } - return host, int(p) + return host, int(p) // nolint: gosec // Bitsize checked to be 16 above. } func netProtocol(proto string) (name string, version string) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 0d3cb2e4aa4fc..b4119d3438b7d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -11,13 +11,15 @@ import ( "sync/atomic" "time" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.20.0" + "go.opentelemetry.io/otel/trace" ) @@ -26,14 +28,16 @@ import ( type Transport struct { rt http.RoundTripper - tracer trace.Tracer - meter metric.Meter - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - filters []Filter - spanNameFormatter func(string, *http.Request) string - clientTrace func(context.Context) *httptrace.ClientTrace + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace + metricAttributesFn func(*http.Request) []attribute.KeyValue + semconv semconv.HTTPClient requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram @@ -53,7 +57,8 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, + rt: base, + semconv: semconv.NewHTTPClient(), } defaultOpts := []Option{ @@ -76,6 +81,7 @@ func (t *Transport) applyConfig(c *config) { t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.metricAttributesFn = c.MetricAttributesFn } func (t *Transport) createMeasures() { @@ -143,45 +149,49 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request. - // use a body wrapper to determine the request size - var bw bodyWrapper // if request body is nil or NoBody, we don't want to mutate the body as it // will affect the identity of it in an unforeseeable way because we assert // ReadCloser fulfills a certain interface and it is indeed nil or NoBody. + bw := request.NewBodyWrapper(r.Body, func(int64) {}) if r.Body != nil && r.Body != http.NoBody { - bw.ReadCloser = r.Body - // noop to prevent nil panic. not using this record fun yet. - bw.record = func(int64) {} - r.Body = &bw + r.Body = bw } - span.SetAttributes(semconvutil.HTTPClientRequest(r)...) + span.SetAttributes(t.semconv.RequestTraceAttrs(r)...) t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) res, err := t.rt.RoundTrip(r) if err != nil { - span.RecordError(err) + // set error type attribute if the error is part of the predefined + // error types. + // otherwise, record it as an exception + if errType := t.semconv.ErrorType(err); errType.Valid() { + span.SetAttributes(errType) + } else { + span.RecordError(err) + } + span.SetStatus(codes.Error, err.Error()) span.End() return res, err } // metrics - metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...) + metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) if res.StatusCode > 0 { metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) } o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. - t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...) + + t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, addOpts...) + t.responseBytesCounter.Add(ctx, n, o) } // traces - span.SetAttributes(semconvutil.HTTPClientResponse(res)...) - span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode)) + span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...) + span.SetStatus(t.semconv.Status(res.StatusCode)) res.Body = newWrappedBody(span, readRecordFunc, res.Body) @@ -193,6 +203,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { return res, err } +func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if t.metricAttributesFn != nil { + attributeForRequest = t.metricAttributesFn(r) + } + return attributeForRequest +} + // newWrappedBody returns a new and appropriately scoped *wrappedBody as an // io.ReadCloser. If the passed body implements io.Writer, the returned value // will implement io.ReadWriteCloser. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index b0957f28cead6..502c1bdafc791 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.53.0" + return "0.54.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go deleted file mode 100644 index 948f8406c09d2..0000000000000 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" - -import ( - "context" - "io" - "net/http" - "sync/atomic" - - "go.opentelemetry.io/otel/propagation" -) - -var _ io.ReadCloser = &bodyWrapper{} - -// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number -// of bytes read and the last error. -type bodyWrapper struct { - io.ReadCloser - record func(n int64) // must not be nil - - read atomic.Int64 - err error -} - -func (w *bodyWrapper) Read(b []byte) (int, error) { - n, err := w.ReadCloser.Read(b) - n1 := int64(n) - w.read.Add(n1) - w.err = err - w.record(n1) - return n, err -} - -func (w *bodyWrapper) Close() error { - return w.ReadCloser.Close() -} - -var _ http.ResponseWriter = &respWriterWrapper{} - -// respWriterWrapper wraps a http.ResponseWriter in order to track the number of -// bytes written, the last error, and to catch the first written statusCode. -// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional -// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) -// that may be useful when using it in real life situations. -type respWriterWrapper struct { - http.ResponseWriter - record func(n int64) // must not be nil - - // used to inject the header - ctx context.Context - - props propagation.TextMapPropagator - - written int64 - statusCode int - err error - wroteHeader bool -} - -func (w *respWriterWrapper) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *respWriterWrapper) Write(p []byte) (int, error) { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - n, err := w.ResponseWriter.Write(p) - n1 := int64(n) - w.record(n1) - w.written += n1 - w.err = err - return n, err -} - -// WriteHeader persists initial statusCode for span attribution. -// All calls to WriteHeader will be propagated to the underlying ResponseWriter -// and will persist the statusCode from the first call. -// Blocking consecutive calls to WriteHeader alters expected behavior and will -// remove warning logs from net/http where developers will notice incorrect handler implementations. -func (w *respWriterWrapper) WriteHeader(statusCode int) { - if !w.wroteHeader { - w.wroteHeader = true - w.statusCode = statusCode - } - w.ResponseWriter.WriteHeader(statusCode) -} - -func (w *respWriterWrapper) Flush() { - if !w.wroteHeader { - w.WriteHeader(http.StatusOK) - } - - if f, ok := w.ResponseWriter.(http.Flusher); ok { - f.Flush() - } -} diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 6d9c8b64958b3..d9abe194d94b9 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,6 +9,8 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: + - asasalint + - bodyclose - depguard - errcheck - errorlint diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index c01e6998e0b3c..6107c17b89fc9 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,64 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.29.0/0.51.0/0.5.0] 2024-08-23 + +This release is the last to support [Go 1.21]. +The next release will require at least [Go 1.22]. + +### Added + +- Add MacOS ARM64 platform to the compatibility testing suite. (#5577) +- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627) +- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. + This new module contains an OTLP exporter that transmits log telemetry using gRPC. + This module is unstable and breaking changes may be introduced. + See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629) +- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651) +- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651) +- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665) +- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`. + This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not. + It replaces the existing `Enabled` method that is removed from the `Processor` interface itself. + It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692) +- Support [Go 1.23]. (#5720) + +### Changed + +- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132) +- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636) +- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665) +- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666) +- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666) +- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method. + See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692) +- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) +- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693) + +### Fixed + +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584) +- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541) +- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612) +- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612) +- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612) +- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612) +- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641) +- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650) +- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) +- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705) + +### Removed + +- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) +- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692) + ## [1.28.0/0.50.0/0.4.0] 2024-07-02 ### Added @@ -49,6 +107,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Fix stale timestamps reported by the last-value aggregation. (#5517) - Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521) - Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549) +- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528) ## [1.27.0/0.49.0/0.3.0] 2024-05-21 @@ -175,7 +234,7 @@ The next release will require at least [Go 1.21]. This module includes OpenTelemetry Go's implementation of the Logs Bridge API. This module is in an alpha state, it is subject to breaking changes. See our [versioning policy](./VERSIONING.md) for more info. (#4961) -- ARM64 platform to the compatibility testing suite. (#4994) +- Add ARM64 platform to the compatibility testing suite. (#4994) ### Fixed @@ -3003,7 +3062,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.29.0...HEAD +[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 [1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0 [1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0 @@ -3086,6 +3146,9 @@ It contains api and sdk for trace and meter. [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 + + +[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 [Go 1.20]: https://go.dev/doc/go1.20 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 2025549332305..5904bb7070eb7 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -5,7 +5,7 @@ ##################################################### # # Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md +# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md # # # Learn about CODEOWNERS file format: diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index b86572f58ea5a..b7402576f9832 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -650,7 +650,7 @@ should be canceled. ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). +repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md). [Approver]: #approvers [Maintainer]: #maintainers diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index f33619f76a2b9..070b1e57df10a 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -178,17 +178,14 @@ test-coverage: $(GOCOVMERGE) done; \ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt -# Adding a directory will include all benchmarks in that directory if a filter is not specified. -BENCHMARK_TARGETS := sdk/trace .PHONY: benchmark -benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) -BENCHMARK_FILTER = . -# You can override the filter for a particular directory by adding a rule here. -benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample +benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%) benchmark/%: - @echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \ + @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \ && cd $* \ - $(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter)) + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=. .PHONY: golangci-lint golangci-lint-fix golangci-lint-fix: ARGS=--fix diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 5a8909317312c..657df34710364 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -47,20 +47,29 @@ stop ensuring compatibility with these versions in the following manner: Currently, this project supports the following environments. -| OS | Go Version | Architecture | -|---------|------------|--------------| -| Ubuntu | 1.22 | amd64 | -| Ubuntu | 1.21 | amd64 | -| Ubuntu | 1.22 | 386 | -| Ubuntu | 1.21 | 386 | -| Linux | 1.22 | arm64 | -| Linux | 1.21 | arm64 | -| MacOS | 1.22 | amd64 | -| MacOS | 1.21 | amd64 | -| Windows | 1.22 | amd64 | -| Windows | 1.21 | amd64 | -| Windows | 1.22 | 386 | -| Windows | 1.21 | 386 | +| OS | Go Version | Architecture | +|----------|------------|--------------| +| Ubuntu | 1.23 | amd64 | +| Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.21 | amd64 | +| Ubuntu | 1.23 | 386 | +| Ubuntu | 1.22 | 386 | +| Ubuntu | 1.21 | 386 | +| Linux | 1.23 | arm64 | +| Linux | 1.22 | arm64 | +| Linux | 1.21 | arm64 | +| macOS 13 | 1.23 | amd64 | +| macOS 13 | 1.22 | amd64 | +| macOS 13 | 1.21 | amd64 | +| macOS | 1.23 | arm64 | +| macOS | 1.22 | arm64 | +| macOS | 1.21 | arm64 | +| Windows | 1.23 | amd64 | +| Windows | 1.22 | amd64 | +| Windows | 1.21 | amd64 | +| Windows | 1.23 | 386 | +| Windows | 1.22 | 386 | +| Windows | 1.21 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 940f57f3d87d1..59992984d427d 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -69,6 +69,7 @@ Update go.mod for submodules to depend on the new release which will happen in t ``` - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Make sure the new section is under the comment for released section, like ``, so it is protected from being overwritten in the future. - Update all the appropriate links at the bottom. 4. Push the changes to upstream and create a Pull Request on GitHub. diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index c40c896cc6673..b3569e95e5cad 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -44,9 +44,15 @@ type Property struct { // NewKeyProperty returns a new Property for key. // +// The passed key must be valid, non-empty UTF-8 string. // If key is invalid, an error will be returned. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyProperty(key string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -62,6 +68,10 @@ func NewKeyProperty(key string) (Property, error) { // Notice: Consider using [NewKeyValuePropertyRaw] instead // that does not require percent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -74,11 +84,20 @@ func NewKeyValueProperty(key, value string) (Property, error) { // NewKeyValuePropertyRaw returns a new Property for key with value. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on Property key. +// For example, the W3C Baggage specification restricts the Property keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as Property key. func NewKeyValuePropertyRaw(key, value string) (Property, error) { - if !validateKey(key) { + if !validateBaggageName(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } + if !validateBaggageValue(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } p := Property{ key: key, @@ -115,12 +134,15 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !validateKey(p.key) { + if !validateBaggageName(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } + if p.hasValue && !validateBaggageValue(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } return nil } @@ -138,7 +160,15 @@ func (p Property) Value() (string, bool) { // String encodes Property into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (p Property) String() string { + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(p.key) { + return "" + } + if p.hasValue { return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } @@ -203,9 +233,14 @@ func (p properties) validate() error { // String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() + props := make([]string, 0, len(p)) + for _, prop := range p { + s := prop.String() + + // Ignored empty properties. + if s != "" { + props = append(props, s) + } } return strings.Join(props, propertyDelimiter) } @@ -230,6 +265,10 @@ type Member struct { // Notice: Consider using [NewMemberRaw] instead // that does not require percent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateKey(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !validateValue(value) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) } @@ -242,7 +281,13 @@ func NewMember(key, value string, props ...Property) (Member, error) { // NewMemberRaw returns a new Member from the passed arguments. // -// The passed key must be compliant with W3C Baggage specification. +// The passed key must be valid, non-empty UTF-8 string. +// The passed value must be valid UTF-8 string. +// However, the specific Propagators that are used to transmit baggage entries across +// component boundaries may impose their own restrictions on baggage key. +// For example, the W3C Baggage specification restricts the baggage keys to strings that +// satisfy the token definition from RFC7230, Section 3.2.6. +// For maximum compatibility, alpha-numeric value are strongly recommended to be used as baggage key. func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, @@ -294,19 +339,45 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - val := strings.TrimSpace(v) - if !validateValue(val) { + rawVal := strings.TrimSpace(v) + if !validateValue(rawVal) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } // Decode a percent-encoded value. - value, err := url.PathUnescape(val) + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err) } + + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) return Member{key: key, value: value, properties: props, hasData: true}, nil } +// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'. +func replaceInvalidUTF8Sequences(cap int, unescapeVal string) string { + if utf8.ValidString(unescapeVal) { + return unescapeVal + } + // W3C baggage spec: + // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69 + + var b strings.Builder + b.Grow(cap) + for i := 0; i < len(unescapeVal); { + r, size := utf8.DecodeRuneInString(unescapeVal[i:]) + if r == utf8.RuneError && size == 1 { + // Invalid UTF-8 sequence found, replace it with '�' + _, _ = b.WriteString("�") + } else { + _, _ = b.WriteRune(r) + } + i += size + } + + return b.String() +} + // validate ensures m conforms to the W3C Baggage specification. // A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { @@ -314,9 +385,12 @@ func (m Member) validate() error { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !validateKey(m.key) { + if !validateBaggageName(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } + if !validateBaggageValue(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } return m.properties.validate() } @@ -331,10 +405,15 @@ func (m Member) Properties() []Property { return m.properties.Copy() } // String encodes Member into a header string compliant with the W3C Baggage // specification. +// It would return empty string if the key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (m Member) String() string { - // A key is just an ASCII string. A value is restricted to be - // US-ASCII characters excluding CTLs, whitespace, - // DQUOTE, comma, semicolon, and backslash. + // W3C Baggage specification does not allow percent-encoded keys. + if !validateKey(m.key) { + return "" + } + s := m.key + keyValueDelimiter + valueEscape(m.value) if len(m.properties) > 0 { s += propertyDelimiter + m.properties.String() @@ -448,7 +527,7 @@ func (b Baggage) Member(key string) Member { } // Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. +// The order of the returned list-members is not significant. // // The returned members are not validated, as we assume the validation happened // when they were added to the Baggage. @@ -469,8 +548,8 @@ func (b Baggage) Members() []Member { return members } -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is +// SetMember returns a copy of the Baggage with the member included. If the +// baggage contains a Member with the same key, the existing Member is // replaced. // // If member is invalid according to the W3C Baggage specification, an error @@ -528,14 +607,22 @@ func (b Baggage) Len() int { // String encodes Baggage into a header string compliant with the W3C Baggage // specification. +// It would ignore members where the member key is invalid with the W3C Baggage +// specification. This could happen for a UTF-8 key, as it may contain +// invalid characters. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { - members = append(members, Member{ + s := Member{ key: k, value: v.Value, properties: fromInternalProperties(v.Properties), - }.String()) + }.String() + + // Ignored empty members. + if s != "" { + members = append(members, s) + } } return strings.Join(members, listDelimiter) } @@ -607,10 +694,12 @@ func parsePropertyInternal(s string) (p Property, ok bool) { } // Decode a percent-encoded value. - value, err := url.PathUnescape(s[valueStart:valueEnd]) + rawVal := s[valueStart:valueEnd] + unescapeVal, err := url.PathUnescape(rawVal) if err != nil { return } + value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) ok = true p.key = s[keyStart:keyEnd] @@ -720,6 +809,24 @@ var safeKeyCharset = [utf8.RuneSelf]bool{ '~': true, } +// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. +// Baggage name is a valid, non-empty UTF-8 string. +func validateBaggageName(s string) bool { + if len(s) == 0 { + return false + } + + return utf8.ValidString(s) +} + +// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value. +// Baggage value is a valid UTF-8 strings. +// Empty string is also a valid UTF-8 string. +func validateBaggageValue(s string) bool { + return utf8.ValidString(s) +} + +// validateKey checks if the string is a valid W3C Baggage key. func validateKey(s string) bool { if len(s) == 0 { return false @@ -738,6 +845,7 @@ func validateKeyChar(c int32) bool { return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c] } +// validateValue checks if the string is a valid W3C Baggage value. func validateValue(s string) bool { for _, c := range s { if !validateValueChar(c) { diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index df29d96a6daea..2acbac3546651 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -83,7 +83,7 @@ func (c *Code) UnmarshalJSON(b []byte) error { return fmt.Errorf("invalid code: %q", ci) } - *c = Code(ci) + *c = Code(ci) // nolint: gosec // Bit size of 32 check above. return nil } return fmt.Errorf("invalid code: %q", string(b)) diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index 441c595014d35..921f85961ad41 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -17,6 +17,8 @@ To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about metrics, see go.opentelemetry.io/otel/metric. +To read more about logs, see go.opentelemetry.io/otel/log. + To read more about propagation, see go.opentelemetry.io/otel/propagation and go.opentelemetry.io/otel/baggage. */ diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 3e7bb3b3566cd..9b1da2c02b934 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -24,7 +24,8 @@ func Int64ToRaw(i int64) uint64 { } func RawToInt64(r uint64) int64 { - return int64(r) + // Assumes original was a valid int64 (overflow not checked). + return int64(r) // nolint: gosec } func Float64ToRaw(f float64) uint64 { @@ -36,9 +37,11 @@ func RawToFloat64(r uint64) float64 { } func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) + // Assumes original was a valid *float64 (overflow not checked). + return (*float64)(unsafe.Pointer(r)) // nolint: gosec } func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) + // Assumes original was a valid *int64 (overflow not checked). + return (*int64)(unsafe.Pointer(r)) // nolint: gosec } diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go index 6a7991e0151cc..14e08c24a4be6 100644 --- a/vendor/go.opentelemetry.io/otel/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -52,6 +52,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error) + // Int64UpDownCounter returns a new Int64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record int64 measurements during a computational @@ -61,6 +62,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error) + // Int64Histogram returns a new Int64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of int64 measurements during a @@ -70,6 +72,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error) + // Int64Gauge returns a new Int64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous int64 measurements during a computational operation. @@ -78,6 +81,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error) + // Int64ObservableCounter returns a new Int64ObservableCounter identified // by name and configured with options. The instrument is used to // asynchronously record increasing int64 measurements once per a @@ -92,6 +96,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error) + // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record int64 measurements once per @@ -106,6 +111,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) + // Int64ObservableGauge returns a new Int64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous int64 measurements once per a @@ -130,6 +136,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error) + // Float64UpDownCounter returns a new Float64UpDownCounter instrument // identified by name and configured with options. The instrument is used // to synchronously record float64 measurements during a computational @@ -139,6 +146,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error) + // Float64Histogram returns a new Float64Histogram instrument identified by // name and configured with options. The instrument is used to // synchronously record the distribution of float64 measurements during a @@ -148,6 +156,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error) + // Float64Gauge returns a new Float64Gauge instrument identified by name and // configured with options. The instrument is used to synchronously record // instantaneous float64 measurements during a computational operation. @@ -156,6 +165,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error) + // Float64ObservableCounter returns a new Float64ObservableCounter // instrument identified by name and configured with options. The // instrument is used to asynchronously record increasing float64 @@ -170,6 +180,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error) + // Float64ObservableUpDownCounter returns a new // Float64ObservableUpDownCounter instrument identified by name and // configured with options. The instrument is used to asynchronously record @@ -184,6 +195,7 @@ type Meter interface { // See the Instrument Name section of the package documentation for more // information. Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) + // Float64ObservableGauge returns a new Float64ObservableGauge instrument // identified by name and configured with options. The instrument is used // to asynchronously record instantaneous float64 measurements once per a @@ -242,6 +254,7 @@ type Observer interface { // ObserveFloat64 records the float64 value for obsrv. ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption) + // ObserveInt64 records the int64 value for obsrv. ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index f4d1857c4f42f..f2cdf3c6518c9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -4,5 +4,6 @@ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. -// Deprecated: please use Scope instead. +// +// Deprecated: use [Scope] instead. type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go index 4f553a5715356..90a4ae16c1a66 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/doc.go @@ -31,6 +31,14 @@ // is being run on. That way when multiple instances of the code are collected // at a single endpoint their origin is decipherable. // +// To avoid leaking memory, the SDK returns the same instrument for calls to +// create new instruments with the same Name, Unit, and Description. +// Importantly, callbacks provided using metric.WithFloat64Callback or +// metric.WithInt64Callback will only apply for the first instrument created +// with a given Name, Unit, and Description. Instead, use +// Meter.RegisterCallback and Registration.Unregister to add and remove +// callbacks without leaking memory. +// // See [go.opentelemetry.io/otel/metric] for more information about // the metric API. // diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go index c9c7e8f62a908..707342408acd2 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/aggregate/exponential_histogram.go @@ -42,14 +42,14 @@ type expoHistogramDataPoint[N int64 | float64] struct { noMinMax bool noSum bool - scale int + scale int32 posBuckets expoBuckets negBuckets expoBuckets zeroCount uint64 } -func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize, maxScale int, noMinMax, noSum bool) *expoHistogramDataPoint[N] { +func newExpoHistogramDataPoint[N int64 | float64](attrs attribute.Set, maxSize int, maxScale int32, noMinMax, noSum bool) *expoHistogramDataPoint[N] { f := math.MaxFloat64 max := N(f) // if N is int64, max will overflow to -9223372036854775808 min := N(-f) @@ -119,11 +119,13 @@ func (p *expoHistogramDataPoint[N]) record(v N) { } // getBin returns the bin v should be recorded into. -func (p *expoHistogramDataPoint[N]) getBin(v float64) int { - frac, exp := math.Frexp(v) +func (p *expoHistogramDataPoint[N]) getBin(v float64) int32 { + frac, expInt := math.Frexp(v) + // 11-bit exponential. + exp := int32(expInt) // nolint: gosec if p.scale <= 0 { // Because of the choice of fraction is always 1 power of two higher than we want. - correction := 1 + var correction int32 = 1 if frac == .5 { // If v is an exact power of two the frac will be .5 and the exp // will be one higher than we want. @@ -131,7 +133,7 @@ func (p *expoHistogramDataPoint[N]) getBin(v float64) int { } return (exp - correction) >> (-p.scale) } - return exp<= bin { - low = bin - high = startBin + length - 1 + low = int(bin) + high = int(startBin) + length - 1 } - count := 0 + var count int32 for high-low >= p.maxSize { low = low >> 1 high = high >> 1 @@ -189,39 +191,39 @@ func (p *expoHistogramDataPoint[N]) scaleChange(bin, startBin, length int) int { // expoBuckets is a set of buckets in an exponential histogram. type expoBuckets struct { - startBin int + startBin int32 counts []uint64 } // record increments the count for the given bin, and expands the buckets if needed. // Size changes must be done before calling this function. -func (b *expoBuckets) record(bin int) { +func (b *expoBuckets) record(bin int32) { if len(b.counts) == 0 { b.counts = []uint64{1} b.startBin = bin return } - endBin := b.startBin + len(b.counts) - 1 + endBin := int(b.startBin) + len(b.counts) - 1 // if the new bin is inside the current range - if bin >= b.startBin && bin <= endBin { + if bin >= b.startBin && int(bin) <= endBin { b.counts[bin-b.startBin]++ return } // if the new bin is before the current start add spaces to the counts if bin < b.startBin { origLen := len(b.counts) - newLength := endBin - bin + 1 + newLength := endBin - int(bin) + 1 shift := b.startBin - bin if newLength > cap(b.counts) { b.counts = append(b.counts, make([]uint64, newLength-len(b.counts))...) } - copy(b.counts[shift:origLen+shift], b.counts[:]) + copy(b.counts[shift:origLen+int(shift)], b.counts[:]) b.counts = b.counts[:newLength] - for i := 1; i < shift; i++ { + for i := 1; i < int(shift); i++ { b.counts[i] = 0 } b.startBin = bin @@ -229,17 +231,17 @@ func (b *expoBuckets) record(bin int) { return } // if the new is after the end add spaces to the end - if bin > endBin { - if bin-b.startBin < cap(b.counts) { + if int(bin) > endBin { + if int(bin-b.startBin) < cap(b.counts) { b.counts = b.counts[:bin-b.startBin+1] - for i := endBin + 1 - b.startBin; i < len(b.counts); i++ { + for i := endBin + 1 - int(b.startBin); i < len(b.counts); i++ { b.counts[i] = 0 } b.counts[bin-b.startBin] = 1 return } - end := make([]uint64, bin-b.startBin-len(b.counts)+1) + end := make([]uint64, int(bin-b.startBin)-len(b.counts)+1) b.counts = append(b.counts, end...) b.counts[bin-b.startBin] = 1 } @@ -247,7 +249,7 @@ func (b *expoBuckets) record(bin int) { // downscale shrinks a bucket by a factor of 2*s. It will sum counts into the // correct lower resolution bucket. -func (b *expoBuckets) downscale(delta int) { +func (b *expoBuckets) downscale(delta int32) { // Example // delta = 2 // Original offset: -6 @@ -262,19 +264,19 @@ func (b *expoBuckets) downscale(delta int) { return } - steps := 1 << delta + steps := int32(1) << delta offset := b.startBin % steps offset = (offset + steps) % steps // to make offset positive for i := 1; i < len(b.counts); i++ { - idx := i + offset - if idx%steps == 0 { - b.counts[idx/steps] = b.counts[i] + idx := i + int(offset) + if idx%int(steps) == 0 { + b.counts[idx/int(steps)] = b.counts[i] continue } - b.counts[idx/steps] += b.counts[i] + b.counts[idx/int(steps)] += b.counts[i] } - lastIdx := (len(b.counts) - 1 + offset) / steps + lastIdx := (len(b.counts) - 1 + int(offset)) / int(steps) b.counts = b.counts[:lastIdx+1] b.startBin = b.startBin >> delta } @@ -287,7 +289,7 @@ func newExponentialHistogram[N int64 | float64](maxSize, maxScale int32, noMinMa noSum: noSum, noMinMax: noMinMax, maxSize: int(maxSize), - maxScale: int(maxScale), + maxScale: maxScale, newRes: r, limit: newLimiter[*expoHistogramDataPoint[N]](limit), @@ -303,7 +305,7 @@ type expoHistogram[N int64 | float64] struct { noSum bool noMinMax bool maxSize int - maxScale int + maxScale int32 newRes func() exemplar.FilteredReservoir[N] limit limiter[*expoHistogramDataPoint[N]] @@ -354,15 +356,15 @@ func (e *expoHistogram[N]) delta(dest *metricdata.Aggregation) int { hDPts[i].StartTime = e.start hDPts[i].Time = t hDPts[i].Count = val.count - hDPts[i].Scale = int32(val.scale) + hDPts[i].Scale = val.scale hDPts[i].ZeroCount = val.zeroCount hDPts[i].ZeroThreshold = 0.0 - hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin) + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) - hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin) + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) @@ -407,15 +409,15 @@ func (e *expoHistogram[N]) cumulative(dest *metricdata.Aggregation) int { hDPts[i].StartTime = e.start hDPts[i].Time = t hDPts[i].Count = val.count - hDPts[i].Scale = int32(val.scale) + hDPts[i].Scale = val.scale hDPts[i].ZeroCount = val.zeroCount hDPts[i].ZeroThreshold = 0.0 - hDPts[i].PositiveBucket.Offset = int32(val.posBuckets.startBin) + hDPts[i].PositiveBucket.Offset = val.posBuckets.startBin hDPts[i].PositiveBucket.Counts = reset(hDPts[i].PositiveBucket.Counts, len(val.posBuckets.counts), len(val.posBuckets.counts)) copy(hDPts[i].PositiveBucket.Counts, val.posBuckets.counts) - hDPts[i].NegativeBucket.Offset = int32(val.negBuckets.startBin) + hDPts[i].NegativeBucket.Offset = val.negBuckets.startBin hDPts[i].NegativeBucket.Counts = reset(hDPts[i].NegativeBucket.Counts, len(val.negBuckets.counts), len(val.negBuckets.counts)) copy(hDPts[i].NegativeBucket.Counts, val.negBuckets.counts) diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go index 9daf27dc00676..1957d6b1e3a37 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/internal/exemplar/value.go @@ -42,7 +42,8 @@ func (v Value) Type() ValueType { return v.t } // Int64ValueType, 0 is returned. func (v Value) Int64() int64 { if v.t == Int64ValueType { - return int64(v.val) + // Assumes the correct int64 was stored in v.val based on type. + return int64(v.val) // nolint: gosec } return 0 } diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go index 479b7610eb16b..2309e5b2b0f8e 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/meter.go @@ -185,6 +185,11 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser // configured with options. The instrument is used to asynchronously record // int64 measurements once per a measurement collection cycle. Only the // measurements recorded during the collection cycle are exported. +// +// If Int64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) id := Instrument{ @@ -201,6 +206,11 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 // configured with options. The instrument is used to asynchronously record // instantaneous int64 measurements once per a measurement collection cycle. // Only the measurements recorded during the collection cycle are exported. +// +// If Int64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { cfg := metric.NewInt64ObservableGaugeConfig(options...) id := Instrument{ @@ -334,6 +344,11 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O // and configured with options. The instrument is used to asynchronously record // float64 measurements once per a measurement collection cycle. Only the // measurements recorded during the collection cycle are exported. +// +// If Float64ObservableUpDownCounter is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) id := Instrument{ @@ -350,6 +365,11 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl // configured with options. The instrument is used to asynchronously record // instantaneous float64 measurements once per a measurement collection cycle. // Only the measurements recorded during the collection cycle are exported. +// +// If Float64ObservableGauge is invoked repeatedly with the same Name, +// Description, and Unit, only the first set of callbacks provided are used. +// Use meter.RegisterCallback and Registration.Unregister to manage callbacks +// if instrumentation can be created multiple times with different callbacks. func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { cfg := metric.NewFloat64ObservableGaugeConfig(options...) id := Instrument{ diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go index a55f9a5372c9c..d94bdee75b731 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/reader.go @@ -34,7 +34,7 @@ var errNonPositiveDuration = fmt.Errorf("non-positive duration") // start of bi-directional control flow. // // Typically, push-based exporters that are periodic will -// implement PeroidicExporter themselves and construct a +// implement PeriodicExporter themselves and construct a // PeriodicReader to satisfy this interface. // // Pull-based exporters will typically implement Register diff --git a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go index dade0a19a2b8a..44316caa11bba 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/metric/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/metric/version.go @@ -5,5 +5,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric" // version is the current release version of the metric SDK in use. func version() string { - return "1.28.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go index 33d065a7cb991..b7cede891c4c6 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.28.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md deleted file mode 100644 index 0b6cbe960cbab..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Semconv v1.24.0 - -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/semconv/v1.24.0)](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.24.0) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go deleted file mode 100644 index 6e688345cbb98..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/attribute_group.go +++ /dev/null @@ -1,4387 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Describes FaaS attributes. -const ( - // FaaSInvokedNameKey is the attribute Key conforming to the - // "faas.invoked_name" semantic conventions. It represents the name of the - // invoked function. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the - // invoked function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - - // FaaSInvokedProviderKey is the attribute Key conforming to the - // "faas.invoked_provider" semantic conventions. It represents the cloud - // provider of the invoked function. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the - // invoked function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - - // FaaSInvokedRegionKey is the attribute Key conforming to the - // "faas.invoked_region" semantic conventions. It represents the cloud - // region of the invoked function. - // - // Type: string - // RequirementLevel: ConditionallyRequired (For some cloud providers, like - // AWS or GCP, the region in which a function is hosted is essential to - // uniquely identify the function and also part of its endpoint. Since it's - // part of the endpoint being called, the region is always known to - // clients. In these cases, `faas.invoked_region` MUST be set accordingly. - // If the region is unknown to the client or not required for identifying - // the invoked function, setting `faas.invoked_region` is optional.) - // Stability: experimental - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the - // invoked function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") - - // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" - // semantic conventions. It represents the type of the trigger which caused - // this function invocation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - FaaSTriggerKey = attribute.Key("faas.trigger") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// FaaSInvokedName returns an attribute KeyValue conforming to the -// "faas.invoked_name" semantic conventions. It represents the name of the -// invoked function. -func FaaSInvokedName(val string) attribute.KeyValue { - return FaaSInvokedNameKey.String(val) -} - -// FaaSInvokedRegion returns an attribute KeyValue conforming to the -// "faas.invoked_region" semantic conventions. It represents the cloud region -// of the invoked function. -func FaaSInvokedRegion(val string) attribute.KeyValue { - return FaaSInvokedRegionKey.String(val) -} - -// Attributes for Events represented using Log Records. -const ( - // EventNameKey is the attribute Key conforming to the "event.name" - // semantic conventions. It represents the identifies the class / type of - // event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'browser.mouse.click', 'device.app.lifecycle' - // Note: Event names are subject to the same rules as [attribute - // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.26.0/specification/common/attribute-naming.md). - // Notably, event names are namespaced to avoid collisions and provide a - // clean separation of semantics for events in separate domains like - // browser, mobile, and kubernetes. - EventNameKey = attribute.Key("event.name") -) - -// EventName returns an attribute KeyValue conforming to the "event.name" -// semantic conventions. It represents the identifies the class / type of -// event. -func EventName(val string) attribute.KeyValue { - return EventNameKey.String(val) -} - -// The attributes described in this section are rather generic. They may be -// used in any Log Record they apply to. -const ( - // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" - // semantic conventions. It represents a unique identifier for the Log - // Record. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' - // Note: If an id is provided, other log records with the same id will be - // considered duplicates and can be removed safely. This means, that two - // distinguishable log records MUST have different values. - // The id MAY be an [Universally Unique Lexicographically Sortable - // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers - // (e.g. UUID) may be used as needed. - LogRecordUIDKey = attribute.Key("log.record.uid") -) - -// LogRecordUID returns an attribute KeyValue conforming to the -// "log.record.uid" semantic conventions. It represents a unique identifier for -// the Log Record. -func LogRecordUID(val string) attribute.KeyValue { - return LogRecordUIDKey.String(val) -} - -// Describes Log attributes -const ( - // LogIostreamKey is the attribute Key conforming to the "log.iostream" - // semantic conventions. It represents the stream associated with the log. - // See below for a list of well-known values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - LogIostreamKey = attribute.Key("log.iostream") -) - -var ( - // Logs from stdout stream - LogIostreamStdout = LogIostreamKey.String("stdout") - // Events from stderr stream - LogIostreamStderr = LogIostreamKey.String("stderr") -) - -// A file to which log was emitted. -const ( - // LogFileNameKey is the attribute Key conforming to the "log.file.name" - // semantic conventions. It represents the basename of the file. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'audit.log' - LogFileNameKey = attribute.Key("log.file.name") - - // LogFileNameResolvedKey is the attribute Key conforming to the - // "log.file.name_resolved" semantic conventions. It represents the - // basename of the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'uuid.log' - LogFileNameResolvedKey = attribute.Key("log.file.name_resolved") - - // LogFilePathKey is the attribute Key conforming to the "log.file.path" - // semantic conventions. It represents the full path to the file. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/log/mysql/audit.log' - LogFilePathKey = attribute.Key("log.file.path") - - // LogFilePathResolvedKey is the attribute Key conforming to the - // "log.file.path_resolved" semantic conventions. It represents the full - // path to the file, with symlinks resolved. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/var/lib/docker/uuid.log' - LogFilePathResolvedKey = attribute.Key("log.file.path_resolved") -) - -// LogFileName returns an attribute KeyValue conforming to the -// "log.file.name" semantic conventions. It represents the basename of the -// file. -func LogFileName(val string) attribute.KeyValue { - return LogFileNameKey.String(val) -} - -// LogFileNameResolved returns an attribute KeyValue conforming to the -// "log.file.name_resolved" semantic conventions. It represents the basename of -// the file, with symlinks resolved. -func LogFileNameResolved(val string) attribute.KeyValue { - return LogFileNameResolvedKey.String(val) -} - -// LogFilePath returns an attribute KeyValue conforming to the -// "log.file.path" semantic conventions. It represents the full path to the -// file. -func LogFilePath(val string) attribute.KeyValue { - return LogFilePathKey.String(val) -} - -// LogFilePathResolved returns an attribute KeyValue conforming to the -// "log.file.path_resolved" semantic conventions. It represents the full path -// to the file, with symlinks resolved. -func LogFilePathResolved(val string) attribute.KeyValue { - return LogFilePathResolvedKey.String(val) -} - -// Describes Database attributes -const ( - // PoolNameKey is the attribute Key conforming to the "pool.name" semantic - // conventions. It represents the name of the connection pool; unique - // within the instrumented application. In case the connection pool - // implementation doesn't provide a name, then the - // [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) - // should be used - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myDataSource' - PoolNameKey = attribute.Key("pool.name") - - // StateKey is the attribute Key conforming to the "state" semantic - // conventions. It represents the state of a connection in the pool - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'idle' - StateKey = attribute.Key("state") -) - -var ( - // idle - StateIdle = StateKey.String("idle") - // used - StateUsed = StateKey.String("used") -) - -// PoolName returns an attribute KeyValue conforming to the "pool.name" -// semantic conventions. It represents the name of the connection pool; unique -// within the instrumented application. In case the connection pool -// implementation doesn't provide a name, then the -// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes) -// should be used -func PoolName(val string) attribute.KeyValue { - return PoolNameKey.String(val) -} - -// ASP.NET Core attributes -const ( - // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to - // the "aspnetcore.diagnostics.handler.type" semantic conventions. It - // represents the full type name of the - // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) - // implementation that handled the exception. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if and only if the exception - // was handled by this handler.) - // Stability: experimental - // Examples: 'Contoso.MyHandler' - AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type") - - // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.policy" semantic conventions. It represents - // the rate limiting policy name. - // - // Type: string - // RequirementLevel: ConditionallyRequired (if the matched endpoint for the - // request had a rate-limiting policy.) - // Stability: experimental - // Examples: 'fixed', 'sliding', 'token' - AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy") - - // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the - // "aspnetcore.rate_limiting.result" semantic conventions. It represents - // the rate-limiting result, shows whether the lease was acquired or - // contains a rejection reason - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Examples: 'acquired', 'request_canceled' - AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result") - - // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the - // "aspnetcore.request.is_unhandled" semantic conventions. It represents - // the flag indicating if request was handled by the application pipeline. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (if and only if the request was - // not handled.) - // Stability: experimental - // Examples: True - AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled") - - // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the - // "aspnetcore.routing.is_fallback" semantic conventions. It represents a - // value that indicates whether the matched route is a fallback route. - // - // Type: boolean - // RequirementLevel: ConditionallyRequired (If and only if a route was - // successfully matched.) - // Stability: experimental - // Examples: True - AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback") -) - -var ( - // Lease was acquired - AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired") - // Lease request was rejected by the endpoint limiter - AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter") - // Lease request was rejected by the global limiter - AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter") - // Lease request was canceled - AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled") -) - -// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming -// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It -// represents the full type name of the -// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler) -// implementation that handled the exception. -func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue { - return AspnetcoreDiagnosticsHandlerTypeKey.String(val) -} - -// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to -// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents -// the rate limiting policy name. -func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue { - return AspnetcoreRateLimitingPolicyKey.String(val) -} - -// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to -// the "aspnetcore.request.is_unhandled" semantic conventions. It represents -// the flag indicating if request was handled by the application pipeline. -func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue { - return AspnetcoreRequestIsUnhandledKey.Bool(val) -} - -// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to -// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a -// value that indicates whether the matched route is a fallback route. -func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue { - return AspnetcoreRoutingIsFallbackKey.Bool(val) -} - -// SignalR attributes -const ( - // SignalrConnectionStatusKey is the attribute Key conforming to the - // "signalr.connection.status" semantic conventions. It represents the - // signalR HTTP connection closure status. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'app_shutdown', 'timeout' - SignalrConnectionStatusKey = attribute.Key("signalr.connection.status") - - // SignalrTransportKey is the attribute Key conforming to the - // "signalr.transport" semantic conventions. It represents the [SignalR - // transport - // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'web_sockets', 'long_polling' - SignalrTransportKey = attribute.Key("signalr.transport") -) - -var ( - // The connection was closed normally - SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure") - // The connection was closed due to a timeout - SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout") - // The connection was closed because the app is shutting down - SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown") -) - -var ( - // ServerSentEvents protocol - SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events") - // LongPolling protocol - SignalrTransportLongPolling = SignalrTransportKey.String("long_polling") - // WebSockets protocol - SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets") -) - -// Describes JVM buffer metric attributes. -const ( - // JvmBufferPoolNameKey is the attribute Key conforming to the - // "jvm.buffer.pool.name" semantic conventions. It represents the name of - // the buffer pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'mapped', 'direct' - // Note: Pool names are generally obtained via - // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()). - JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name") -) - -// JvmBufferPoolName returns an attribute KeyValue conforming to the -// "jvm.buffer.pool.name" semantic conventions. It represents the name of the -// buffer pool. -func JvmBufferPoolName(val string) attribute.KeyValue { - return JvmBufferPoolNameKey.String(val) -} - -// Describes JVM memory metric attributes. -const ( - // JvmMemoryPoolNameKey is the attribute Key conforming to the - // "jvm.memory.pool.name" semantic conventions. It represents the name of - // the memory pool. - // - // Type: string - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space' - // Note: Pool names are generally obtained via - // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()). - JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name") - - // JvmMemoryTypeKey is the attribute Key conforming to the - // "jvm.memory.type" semantic conventions. It represents the type of - // memory. - // - // Type: Enum - // RequirementLevel: Recommended - // Stability: stable - // Examples: 'heap', 'non_heap' - JvmMemoryTypeKey = attribute.Key("jvm.memory.type") -) - -var ( - // Heap memory - JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap") - // Non-heap memory - JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap") -) - -// JvmMemoryPoolName returns an attribute KeyValue conforming to the -// "jvm.memory.pool.name" semantic conventions. It represents the name of the -// memory pool. -func JvmMemoryPoolName(val string) attribute.KeyValue { - return JvmMemoryPoolNameKey.String(val) -} - -// Describes System metric attributes -const ( - // SystemDeviceKey is the attribute Key conforming to the "system.device" - // semantic conventions. It represents the device identifier - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '(identifier)' - SystemDeviceKey = attribute.Key("system.device") -) - -// SystemDevice returns an attribute KeyValue conforming to the -// "system.device" semantic conventions. It represents the device identifier -func SystemDevice(val string) attribute.KeyValue { - return SystemDeviceKey.String(val) -} - -// Describes System CPU metric attributes -const ( - // SystemCPULogicalNumberKey is the attribute Key conforming to the - // "system.cpu.logical_number" semantic conventions. It represents the - // logical CPU number [0..n-1] - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number") - - // SystemCPUStateKey is the attribute Key conforming to the - // "system.cpu.state" semantic conventions. It represents the state of the - // CPU - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'idle', 'interrupt' - SystemCPUStateKey = attribute.Key("system.cpu.state") -) - -var ( - // user - SystemCPUStateUser = SystemCPUStateKey.String("user") - // system - SystemCPUStateSystem = SystemCPUStateKey.String("system") - // nice - SystemCPUStateNice = SystemCPUStateKey.String("nice") - // idle - SystemCPUStateIdle = SystemCPUStateKey.String("idle") - // iowait - SystemCPUStateIowait = SystemCPUStateKey.String("iowait") - // interrupt - SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt") - // steal - SystemCPUStateSteal = SystemCPUStateKey.String("steal") -) - -// SystemCPULogicalNumber returns an attribute KeyValue conforming to the -// "system.cpu.logical_number" semantic conventions. It represents the logical -// CPU number [0..n-1] -func SystemCPULogicalNumber(val int) attribute.KeyValue { - return SystemCPULogicalNumberKey.Int(val) -} - -// Describes System Memory metric attributes -const ( - // SystemMemoryStateKey is the attribute Key conforming to the - // "system.memory.state" semantic conventions. It represents the memory - // state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free', 'cached' - SystemMemoryStateKey = attribute.Key("system.memory.state") -) - -var ( - // used - SystemMemoryStateUsed = SystemMemoryStateKey.String("used") - // free - SystemMemoryStateFree = SystemMemoryStateKey.String("free") - // shared - SystemMemoryStateShared = SystemMemoryStateKey.String("shared") - // buffers - SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers") - // cached - SystemMemoryStateCached = SystemMemoryStateKey.String("cached") -) - -// Describes System Memory Paging metric attributes -const ( - // SystemPagingDirectionKey is the attribute Key conforming to the - // "system.paging.direction" semantic conventions. It represents the paging - // access direction - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'in' - SystemPagingDirectionKey = attribute.Key("system.paging.direction") - - // SystemPagingStateKey is the attribute Key conforming to the - // "system.paging.state" semantic conventions. It represents the memory - // paging state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'free' - SystemPagingStateKey = attribute.Key("system.paging.state") - - // SystemPagingTypeKey is the attribute Key conforming to the - // "system.paging.type" semantic conventions. It represents the memory - // paging type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'minor' - SystemPagingTypeKey = attribute.Key("system.paging.type") -) - -var ( - // in - SystemPagingDirectionIn = SystemPagingDirectionKey.String("in") - // out - SystemPagingDirectionOut = SystemPagingDirectionKey.String("out") -) - -var ( - // used - SystemPagingStateUsed = SystemPagingStateKey.String("used") - // free - SystemPagingStateFree = SystemPagingStateKey.String("free") -) - -var ( - // major - SystemPagingTypeMajor = SystemPagingTypeKey.String("major") - // minor - SystemPagingTypeMinor = SystemPagingTypeKey.String("minor") -) - -// Describes Filesystem metric attributes -const ( - // SystemFilesystemModeKey is the attribute Key conforming to the - // "system.filesystem.mode" semantic conventions. It represents the - // filesystem mode - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'rw, ro' - SystemFilesystemModeKey = attribute.Key("system.filesystem.mode") - - // SystemFilesystemMountpointKey is the attribute Key conforming to the - // "system.filesystem.mountpoint" semantic conventions. It represents the - // filesystem mount path - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/mnt/data' - SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint") - - // SystemFilesystemStateKey is the attribute Key conforming to the - // "system.filesystem.state" semantic conventions. It represents the - // filesystem state - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'used' - SystemFilesystemStateKey = attribute.Key("system.filesystem.state") - - // SystemFilesystemTypeKey is the attribute Key conforming to the - // "system.filesystem.type" semantic conventions. It represents the - // filesystem type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ext4' - SystemFilesystemTypeKey = attribute.Key("system.filesystem.type") -) - -var ( - // used - SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used") - // free - SystemFilesystemStateFree = SystemFilesystemStateKey.String("free") - // reserved - SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved") -) - -var ( - // fat32 - SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32") - // exfat - SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat") - // ntfs - SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs") - // refs - SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs") - // hfsplus - SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus") - // ext4 - SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4") -) - -// SystemFilesystemMode returns an attribute KeyValue conforming to the -// "system.filesystem.mode" semantic conventions. It represents the filesystem -// mode -func SystemFilesystemMode(val string) attribute.KeyValue { - return SystemFilesystemModeKey.String(val) -} - -// SystemFilesystemMountpoint returns an attribute KeyValue conforming to -// the "system.filesystem.mountpoint" semantic conventions. It represents the -// filesystem mount path -func SystemFilesystemMountpoint(val string) attribute.KeyValue { - return SystemFilesystemMountpointKey.String(val) -} - -// Describes Network metric attributes -const ( - // SystemNetworkStateKey is the attribute Key conforming to the - // "system.network.state" semantic conventions. It represents a stateless - // protocol MUST NOT set this attribute - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'close_wait' - SystemNetworkStateKey = attribute.Key("system.network.state") -) - -var ( - // close - SystemNetworkStateClose = SystemNetworkStateKey.String("close") - // close_wait - SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait") - // closing - SystemNetworkStateClosing = SystemNetworkStateKey.String("closing") - // delete - SystemNetworkStateDelete = SystemNetworkStateKey.String("delete") - // established - SystemNetworkStateEstablished = SystemNetworkStateKey.String("established") - // fin_wait_1 - SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1") - // fin_wait_2 - SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2") - // last_ack - SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack") - // listen - SystemNetworkStateListen = SystemNetworkStateKey.String("listen") - // syn_recv - SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv") - // syn_sent - SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent") - // time_wait - SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait") -) - -// Describes System Process metric attributes -const ( - // SystemProcessesStatusKey is the attribute Key conforming to the - // "system.processes.status" semantic conventions. It represents the - // process state, e.g., [Linux Process State - // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'running' - SystemProcessesStatusKey = attribute.Key("system.processes.status") -) - -var ( - // running - SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running") - // sleeping - SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping") - // stopped - SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped") - // defunct - SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct") -) - -// These attributes may be used to describe the client in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ClientAddressKey is the attribute Key conforming to the "client.address" - // semantic conventions. It represents the client address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.address` SHOULD represent the client address - // behind any intermediaries, for example proxies, if it's available. - ClientAddressKey = attribute.Key("client.address") - - // ClientPortKey is the attribute Key conforming to the "client.port" - // semantic conventions. It represents the client port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - // Note: When observed from the server side, and when communicating through - // an intermediary, `client.port` SHOULD represent the client port behind - // any intermediaries, for example proxies, if it's available. - ClientPortKey = attribute.Key("client.port") -) - -// ClientAddress returns an attribute KeyValue conforming to the -// "client.address" semantic conventions. It represents the client address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func ClientAddress(val string) attribute.KeyValue { - return ClientAddressKey.String(val) -} - -// ClientPort returns an attribute KeyValue conforming to the "client.port" -// semantic conventions. It represents the client port number. -func ClientPort(val int) attribute.KeyValue { - return ClientPortKey.Int(val) -} - -// The attributes used to describe telemetry in the context of databases. -const ( - // DBCassandraConsistencyLevelKey is the attribute Key conforming to the - // "db.cassandra.consistency_level" semantic conventions. It represents the - // consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - - // DBCassandraCoordinatorDCKey is the attribute Key conforming to the - // "db.cassandra.coordinator.dc" semantic conventions. It represents the - // data center of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") - - // DBCassandraCoordinatorIDKey is the attribute Key conforming to the - // "db.cassandra.coordinator.id" semantic conventions. It represents the ID - // of the coordinating node for a query. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - - // DBCassandraIdempotenceKey is the attribute Key conforming to the - // "db.cassandra.idempotence" semantic conventions. It represents the - // whether or not the query is idempotent. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - - // DBCassandraPageSizeKey is the attribute Key conforming to the - // "db.cassandra.page_size" semantic conventions. It represents the fetch - // size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - - // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming - // to the "db.cassandra.speculative_execution_count" semantic conventions. - // It represents the number of times a query was speculatively executed. - // Not set or `0` if the query was not executed speculatively. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - - // DBCassandraTableKey is the attribute Key conforming to the - // "db.cassandra.table" semantic conventions. It represents the name of the - // primary Cassandra table that the operation is acting upon, including the - // keyspace name (if applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra - // rather than sql. It is not recommended to attempt any client-side - // parsing of `db.statement` just to get this property, but it should be - // set if it is provided by the library being instrumented. If the - // operation is acting upon an anonymous table, or more than one table, - // this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - - // DBConnectionStringKey is the attribute Key conforming to the - // "db.connection_string" semantic conventions. It represents the - // connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - - // DBCosmosDBClientIDKey is the attribute Key conforming to the - // "db.cosmosdb.client_id" semantic conventions. It represents the unique - // Cosmos client instance id. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' - DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") - - // DBCosmosDBConnectionModeKey is the attribute Key conforming to the - // "db.cosmosdb.connection_mode" semantic conventions. It represents the - // cosmos client connection mode. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") - - // DBCosmosDBContainerKey is the attribute Key conforming to the - // "db.cosmosdb.container" semantic conventions. It represents the cosmos - // DB container name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'anystring' - DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") - - // DBCosmosDBOperationTypeKey is the attribute Key conforming to the - // "db.cosmosdb.operation_type" semantic conventions. It represents the - // cosmosDB Operation Type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") - - // DBCosmosDBRequestChargeKey is the attribute Key conforming to the - // "db.cosmosdb.request_charge" semantic conventions. It represents the rU - // consumed for that operation - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 46.18, 1.0 - DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") - - // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the - // "db.cosmosdb.request_content_length" semantic conventions. It represents - // the request payload size in bytes - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") - - // DBCosmosDBStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos - // DB status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 200, 201 - DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") - - // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the - // "db.cosmosdb.sub_status_code" semantic conventions. It represents the - // cosmos DB sub status code. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1000, 1002 - DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") - - // DBElasticsearchClusterNameKey is the attribute Key conforming to the - // "db.elasticsearch.cluster.name" semantic conventions. It represents the - // represents the identifier of an Elasticsearch cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f' - DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name") - - // DBElasticsearchNodeNameKey is the attribute Key conforming to the - // "db.elasticsearch.node.name" semantic conventions. It represents the - // represents the human-readable identifier of the node/instance to which a - // request was routed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-0000000001' - DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name") - - // DBInstanceIDKey is the attribute Key conforming to the "db.instance.id" - // semantic conventions. It represents an identifier (address, unique name, - // or any other identifier) of the database instance that is executing - // queries or mutations on the current connection. This is useful in cases - // where the database is running in a clustered environment and the - // instrumentation is able to record the node executing the query. The - // client may obtain this value in databases like MySQL using queries like - // `select @@hostname`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mysql-e26b99z.example.com' - DBInstanceIDKey = attribute.Key("db.instance.id") - - // DBJDBCDriverClassnameKey is the attribute Key conforming to the - // "db.jdbc.driver_classname" semantic conventions. It represents the - // fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) - // driver used to connect. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - - // DBMongoDBCollectionKey is the attribute Key conforming to the - // "db.mongodb.collection" semantic conventions. It represents the MongoDB - // collection being accessed within the database stated in `db.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") - - // DBMSSQLInstanceNameKey is the attribute Key conforming to the - // "db.mssql.instance_name" semantic conventions. It represents the - // Microsoft SQL Server [instance - // name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named - // instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `server.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") - - // DBNameKey is the attribute Key conforming to the "db.name" semantic - // conventions. It represents the this attribute is used to report the name - // of the database being accessed. For commands that switch the database, - // this should be set to the target database (even if the command fails). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called - // "schema name". In case there are multiple layers that could be - // considered for database name (e.g. Oracle instance name and schema - // name), the database name to be used is the more specific layer (e.g. - // Oracle schema name). - DBNameKey = attribute.Key("db.name") - - // DBOperationKey is the attribute Key conforming to the "db.operation" - // semantic conventions. It represents the name of the operation being - // executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to - // attempt any client-side parsing of `db.statement` just to get this - // property, but it should be set if the operation name is provided by the - // library being instrumented. If the SQL statement has an ambiguous - // operation, or performs more than one operation, this value may be - // omitted. - DBOperationKey = attribute.Key("db.operation") - - // DBRedisDBIndexKey is the attribute Key conforming to the - // "db.redis.database_index" semantic conventions. It represents the index - // of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To - // be used instead of the generic `db.name` attribute. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") - - // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" - // semantic conventions. It represents the name of the primary table that - // the operation is acting upon, including the database name (if - // applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting - // upon an anonymous table, or more than one table, this value MUST NOT be - // set. - DBSQLTableKey = attribute.Key("db.sql.table") - - // DBStatementKey is the attribute Key conforming to the "db.statement" - // semantic conventions. It represents the database statement being - // executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - DBStatementKey = attribute.Key("db.statement") - - // DBSystemKey is the attribute Key conforming to the "db.system" semantic - // conventions. It represents an identifier for the database management - // system (DBMS) product being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - DBSystemKey = attribute.Key("db.system") - - // DBUserKey is the attribute Key conforming to the "db.user" semantic - // conventions. It represents the username for accessing the database. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -var ( - // Gateway (HTTP) connections mode - DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") - // Direct connection - DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") -) - -var ( - // invalid - DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") - // create - DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") - // patch - DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") - // read - DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") - // read_feed - DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") - // delete - DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") - // replace - DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") - // execute - DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") - // query - DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") - // head - DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") - // head_feed - DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") - // upsert - DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") - // batch - DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") - // query_plan - DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") - // execute_javascript - DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // Microsoft SQL Server Compact - DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") - // OpenSearch - DBSystemOpensearch = DBSystemKey.String("opensearch") - // ClickHouse - DBSystemClickhouse = DBSystemKey.String("clickhouse") - // Cloud Spanner - DBSystemSpanner = DBSystemKey.String("spanner") - // Trino - DBSystemTrino = DBSystemKey.String("trino") -) - -// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.dc" semantic conventions. It represents the data -// center of the coordinating node for a query. -func DBCassandraCoordinatorDC(val string) attribute.KeyValue { - return DBCassandraCoordinatorDCKey.String(val) -} - -// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the -// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of -// the coordinating node for a query. -func DBCassandraCoordinatorID(val string) attribute.KeyValue { - return DBCassandraCoordinatorIDKey.String(val) -} - -// DBCassandraIdempotence returns an attribute KeyValue conforming to the -// "db.cassandra.idempotence" semantic conventions. It represents the whether -// or not the query is idempotent. -func DBCassandraIdempotence(val bool) attribute.KeyValue { - return DBCassandraIdempotenceKey.Bool(val) -} - -// DBCassandraPageSize returns an attribute KeyValue conforming to the -// "db.cassandra.page_size" semantic conventions. It represents the fetch size -// used for paging, i.e. how many rows will be returned at once. -func DBCassandraPageSize(val int) attribute.KeyValue { - return DBCassandraPageSizeKey.Int(val) -} - -// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue -// conforming to the "db.cassandra.speculative_execution_count" semantic -// conventions. It represents the number of times a query was speculatively -// executed. Not set or `0` if the query was not executed speculatively. -func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { - return DBCassandraSpeculativeExecutionCountKey.Int(val) -} - -// DBCassandraTable returns an attribute KeyValue conforming to the -// "db.cassandra.table" semantic conventions. It represents the name of the -// primary Cassandra table that the operation is acting upon, including the -// keyspace name (if applicable). -func DBCassandraTable(val string) attribute.KeyValue { - return DBCassandraTableKey.String(val) -} - -// DBConnectionString returns an attribute KeyValue conforming to the -// "db.connection_string" semantic conventions. It represents the connection -// string used to connect to the database. It is recommended to remove embedded -// credentials. -func DBConnectionString(val string) attribute.KeyValue { - return DBConnectionStringKey.String(val) -} - -// DBCosmosDBClientID returns an attribute KeyValue conforming to the -// "db.cosmosdb.client_id" semantic conventions. It represents the unique -// Cosmos client instance id. -func DBCosmosDBClientID(val string) attribute.KeyValue { - return DBCosmosDBClientIDKey.String(val) -} - -// DBCosmosDBContainer returns an attribute KeyValue conforming to the -// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB -// container name. -func DBCosmosDBContainer(val string) attribute.KeyValue { - return DBCosmosDBContainerKey.String(val) -} - -// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the -// "db.cosmosdb.request_charge" semantic conventions. It represents the rU -// consumed for that operation -func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { - return DBCosmosDBRequestChargeKey.Float64(val) -} - -// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming -// to the "db.cosmosdb.request_content_length" semantic conventions. It -// represents the request payload size in bytes -func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { - return DBCosmosDBRequestContentLengthKey.Int(val) -} - -// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB -// status code. -func DBCosmosDBStatusCode(val int) attribute.KeyValue { - return DBCosmosDBStatusCodeKey.Int(val) -} - -// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the -// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos -// DB sub status code. -func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { - return DBCosmosDBSubStatusCodeKey.Int(val) -} - -// DBElasticsearchClusterName returns an attribute KeyValue conforming to -// the "db.elasticsearch.cluster.name" semantic conventions. It represents the -// represents the identifier of an Elasticsearch cluster. -func DBElasticsearchClusterName(val string) attribute.KeyValue { - return DBElasticsearchClusterNameKey.String(val) -} - -// DBElasticsearchNodeName returns an attribute KeyValue conforming to the -// "db.elasticsearch.node.name" semantic conventions. It represents the -// represents the human-readable identifier of the node/instance to which a -// request was routed. -func DBElasticsearchNodeName(val string) attribute.KeyValue { - return DBElasticsearchNodeNameKey.String(val) -} - -// DBInstanceID returns an attribute KeyValue conforming to the -// "db.instance.id" semantic conventions. It represents an identifier (address, -// unique name, or any other identifier) of the database instance that is -// executing queries or mutations on the current connection. This is useful in -// cases where the database is running in a clustered environment and the -// instrumentation is able to record the node executing the query. The client -// may obtain this value in databases like MySQL using queries like `select -// @@hostname`. -func DBInstanceID(val string) attribute.KeyValue { - return DBInstanceIDKey.String(val) -} - -// DBJDBCDriverClassname returns an attribute KeyValue conforming to the -// "db.jdbc.driver_classname" semantic conventions. It represents the -// fully-qualified class name of the [Java Database Connectivity -// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver -// used to connect. -func DBJDBCDriverClassname(val string) attribute.KeyValue { - return DBJDBCDriverClassnameKey.String(val) -} - -// DBMongoDBCollection returns an attribute KeyValue conforming to the -// "db.mongodb.collection" semantic conventions. It represents the MongoDB -// collection being accessed within the database stated in `db.name`. -func DBMongoDBCollection(val string) attribute.KeyValue { - return DBMongoDBCollectionKey.String(val) -} - -// DBMSSQLInstanceName returns an attribute KeyValue conforming to the -// "db.mssql.instance_name" semantic conventions. It represents the Microsoft -// SQL Server [instance -// name](https://docs.microsoft.com/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) -// connecting to. This name is used to determine the port of a named instance. -func DBMSSQLInstanceName(val string) attribute.KeyValue { - return DBMSSQLInstanceNameKey.String(val) -} - -// DBName returns an attribute KeyValue conforming to the "db.name" semantic -// conventions. It represents the this attribute is used to report the name of -// the database being accessed. For commands that switch the database, this -// should be set to the target database (even if the command fails). -func DBName(val string) attribute.KeyValue { - return DBNameKey.String(val) -} - -// DBOperation returns an attribute KeyValue conforming to the -// "db.operation" semantic conventions. It represents the name of the operation -// being executed, e.g. the [MongoDB command -// name](https://docs.mongodb.com/manual/reference/command/#database-operations) -// such as `findAndModify`, or the SQL keyword. -func DBOperation(val string) attribute.KeyValue { - return DBOperationKey.String(val) -} - -// DBRedisDBIndex returns an attribute KeyValue conforming to the -// "db.redis.database_index" semantic conventions. It represents the index of -// the database being accessed as used in the [`SELECT` -// command](https://redis.io/commands/select), provided as an integer. To be -// used instead of the generic `db.name` attribute. -func DBRedisDBIndex(val int) attribute.KeyValue { - return DBRedisDBIndexKey.Int(val) -} - -// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" -// semantic conventions. It represents the name of the primary table that the -// operation is acting upon, including the database name (if applicable). -func DBSQLTable(val string) attribute.KeyValue { - return DBSQLTableKey.String(val) -} - -// DBStatement returns an attribute KeyValue conforming to the -// "db.statement" semantic conventions. It represents the database statement -// being executed. -func DBStatement(val string) attribute.KeyValue { - return DBStatementKey.String(val) -} - -// DBUser returns an attribute KeyValue conforming to the "db.user" semantic -// conventions. It represents the username for accessing the database. -func DBUser(val string) attribute.KeyValue { - return DBUserKey.String(val) -} - -// Describes deprecated HTTP attributes. -const ( - // HTTPFlavorKey is the attribute Key conforming to the "http.flavor" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorKey = attribute.Key("http.flavor") - - // HTTPMethodKey is the attribute Key conforming to the "http.method" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'GET', 'POST', 'HEAD' - // Deprecated: use `http.request.method` instead. - HTTPMethodKey = attribute.Key("http.method") - - // HTTPRequestContentLengthKey is the attribute Key conforming to the - // "http.request_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.request.header.content-length` instead. - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - - // HTTPResponseContentLengthKey is the attribute Key conforming to the - // "http.response_content_length" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 3495 - // Deprecated: use `http.response.header.content-length` instead. - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - - // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'http', 'https' - // Deprecated: use `url.scheme` instead. - HTTPSchemeKey = attribute.Key("http.scheme") - - // HTTPStatusCodeKey is the attribute Key conforming to the - // "http.status_code" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 200 - // Deprecated: use `http.response.status_code` instead. - HTTPStatusCodeKey = attribute.Key("http.status_code") - - // HTTPTargetKey is the attribute Key conforming to the "http.target" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.path` and `url.query` instead. - HTTPTargetKey = attribute.Key("http.target") - - // HTTPURLKey is the attribute Key conforming to the "http.url" semantic - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Deprecated: use `url.full` instead. - HTTPURLKey = attribute.Key("http.url") - - // HTTPUserAgentKey is the attribute Key conforming to the - // "http.user_agent" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - // Deprecated: use `user_agent.original` instead. - HTTPUserAgentKey = attribute.Key("http.user_agent") -) - -var ( - // HTTP/1.0 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - // - // Deprecated: use `network.protocol.name` instead. - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// HTTPMethod returns an attribute KeyValue conforming to the "http.method" -// semantic conventions. -// -// Deprecated: use `http.request.method` instead. -func HTTPMethod(val string) attribute.KeyValue { - return HTTPMethodKey.String(val) -} - -// HTTPRequestContentLength returns an attribute KeyValue conforming to the -// "http.request_content_length" semantic conventions. -// -// Deprecated: use `http.request.header.content-length` instead. -func HTTPRequestContentLength(val int) attribute.KeyValue { - return HTTPRequestContentLengthKey.Int(val) -} - -// HTTPResponseContentLength returns an attribute KeyValue conforming to the -// "http.response_content_length" semantic conventions. -// -// Deprecated: use `http.response.header.content-length` instead. -func HTTPResponseContentLength(val int) attribute.KeyValue { - return HTTPResponseContentLengthKey.Int(val) -} - -// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" -// semantic conventions. -// -// Deprecated: use `url.scheme` instead. -func HTTPScheme(val string) attribute.KeyValue { - return HTTPSchemeKey.String(val) -} - -// HTTPStatusCode returns an attribute KeyValue conforming to the -// "http.status_code" semantic conventions. -// -// Deprecated: use `http.response.status_code` instead. -func HTTPStatusCode(val int) attribute.KeyValue { - return HTTPStatusCodeKey.Int(val) -} - -// HTTPTarget returns an attribute KeyValue conforming to the "http.target" -// semantic conventions. -// -// Deprecated: use `url.path` and `url.query` instead. -func HTTPTarget(val string) attribute.KeyValue { - return HTTPTargetKey.String(val) -} - -// HTTPURL returns an attribute KeyValue conforming to the "http.url" -// semantic conventions. -// -// Deprecated: use `url.full` instead. -func HTTPURL(val string) attribute.KeyValue { - return HTTPURLKey.String(val) -} - -// HTTPUserAgent returns an attribute KeyValue conforming to the -// "http.user_agent" semantic conventions. -// -// Deprecated: use `user_agent.original` instead. -func HTTPUserAgent(val string) attribute.KeyValue { - return HTTPUserAgentKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetHostNameKey is the attribute Key conforming to the "net.host.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address`. - NetHostNameKey = attribute.Key("net.host.name") - - // NetHostPortKey is the attribute Key conforming to the "net.host.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port`. - NetHostPortKey = attribute.Key("net.host.port") - - // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" - // semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'example.com' - // Deprecated: use `server.address` on client spans and `client.address` on - // server spans. - NetPeerNameKey = attribute.Key("net.peer.name") - - // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" - // semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `server.port` on client spans and `client.port` on - // server spans. - NetPeerPortKey = attribute.Key("net.peer.port") - - // NetProtocolNameKey is the attribute Key conforming to the - // "net.protocol.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'amqp', 'http', 'mqtt' - // Deprecated: use `network.protocol.name`. - NetProtocolNameKey = attribute.Key("net.protocol.name") - - // NetProtocolVersionKey is the attribute Key conforming to the - // "net.protocol.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '3.1.1' - // Deprecated: use `network.protocol.version`. - NetProtocolVersionKey = attribute.Key("net.protocol.version") - - // NetSockFamilyKey is the attribute Key conforming to the - // "net.sock.family" semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyKey = attribute.Key("net.sock.family") - - // NetSockHostAddrKey is the attribute Key conforming to the - // "net.sock.host.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: use `network.local.address`. - NetSockHostAddrKey = attribute.Key("net.sock.host.addr") - - // NetSockHostPortKey is the attribute Key conforming to the - // "net.sock.host.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 8080 - // Deprecated: use `network.local.port`. - NetSockHostPortKey = attribute.Key("net.sock.host.port") - - // NetSockPeerAddrKey is the attribute Key conforming to the - // "net.sock.peer.addr" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '192.168.0.1' - // Deprecated: use `network.peer.address`. - NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") - - // NetSockPeerNameKey is the attribute Key conforming to the - // "net.sock.peer.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '/var/my.sock' - // Deprecated: no replacement at this time. - NetSockPeerNameKey = attribute.Key("net.sock.peer.name") - - // NetSockPeerPortKey is the attribute Key conforming to the - // "net.sock.peer.port" semantic conventions. - // - // Type: int - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 65531 - // Deprecated: use `network.peer.port`. - NetSockPeerPortKey = attribute.Key("net.sock.peer.port") - - // NetTransportKey is the attribute Key conforming to the "net.transport" - // semantic conventions. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: deprecated - // Deprecated: use `network.transport`. - NetTransportKey = attribute.Key("net.transport") -) - -var ( - // IPv4 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet = NetSockFamilyKey.String("inet") - // IPv6 address - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") - // Unix domain socket path - // - // Deprecated: use `network.transport` and `network.type`. - NetSockFamilyUnix = NetSockFamilyKey.String("unix") -) - -var ( - // ip_tcp - // - // Deprecated: use `network.transport`. - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - // - // Deprecated: use `network.transport`. - NetTransportUDP = NetTransportKey.String("ip_udp") - // Named or anonymous pipe - // - // Deprecated: use `network.transport`. - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - // - // Deprecated: use `network.transport`. - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - // - // Deprecated: use `network.transport`. - NetTransportOther = NetTransportKey.String("other") -) - -// NetHostName returns an attribute KeyValue conforming to the -// "net.host.name" semantic conventions. -// -// Deprecated: use `server.address`. -func NetHostName(val string) attribute.KeyValue { - return NetHostNameKey.String(val) -} - -// NetHostPort returns an attribute KeyValue conforming to the -// "net.host.port" semantic conventions. -// -// Deprecated: use `server.port`. -func NetHostPort(val int) attribute.KeyValue { - return NetHostPortKey.Int(val) -} - -// NetPeerName returns an attribute KeyValue conforming to the -// "net.peer.name" semantic conventions. -// -// Deprecated: use `server.address` on client spans and `client.address` on -// server spans. -func NetPeerName(val string) attribute.KeyValue { - return NetPeerNameKey.String(val) -} - -// NetPeerPort returns an attribute KeyValue conforming to the -// "net.peer.port" semantic conventions. -// -// Deprecated: use `server.port` on client spans and `client.port` on server -// spans. -func NetPeerPort(val int) attribute.KeyValue { - return NetPeerPortKey.Int(val) -} - -// NetProtocolName returns an attribute KeyValue conforming to the -// "net.protocol.name" semantic conventions. -// -// Deprecated: use `network.protocol.name`. -func NetProtocolName(val string) attribute.KeyValue { - return NetProtocolNameKey.String(val) -} - -// NetProtocolVersion returns an attribute KeyValue conforming to the -// "net.protocol.version" semantic conventions. -// -// Deprecated: use `network.protocol.version`. -func NetProtocolVersion(val string) attribute.KeyValue { - return NetProtocolVersionKey.String(val) -} - -// NetSockHostAddr returns an attribute KeyValue conforming to the -// "net.sock.host.addr" semantic conventions. -// -// Deprecated: use `network.local.address`. -func NetSockHostAddr(val string) attribute.KeyValue { - return NetSockHostAddrKey.String(val) -} - -// NetSockHostPort returns an attribute KeyValue conforming to the -// "net.sock.host.port" semantic conventions. -// -// Deprecated: use `network.local.port`. -func NetSockHostPort(val int) attribute.KeyValue { - return NetSockHostPortKey.Int(val) -} - -// NetSockPeerAddr returns an attribute KeyValue conforming to the -// "net.sock.peer.addr" semantic conventions. -// -// Deprecated: use `network.peer.address`. -func NetSockPeerAddr(val string) attribute.KeyValue { - return NetSockPeerAddrKey.String(val) -} - -// NetSockPeerName returns an attribute KeyValue conforming to the -// "net.sock.peer.name" semantic conventions. -// -// Deprecated: no replacement at this time. -func NetSockPeerName(val string) attribute.KeyValue { - return NetSockPeerNameKey.String(val) -} - -// NetSockPeerPort returns an attribute KeyValue conforming to the -// "net.sock.peer.port" semantic conventions. -// -// Deprecated: use `network.peer.port`. -func NetSockPeerPort(val int) attribute.KeyValue { - return NetSockPeerPortKey.Int(val) -} - -// These attributes may be used to describe the receiver of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // DestinationAddressKey is the attribute Key conforming to the - // "destination.address" semantic conventions. It represents the - // destination address - domain name if available without reverse DNS - // lookup; otherwise, IP address or Unix domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the source side, and when communicating through - // an intermediary, `destination.address` SHOULD represent the destination - // address behind any intermediaries, for example proxies, if it's - // available. - DestinationAddressKey = attribute.Key("destination.address") - - // DestinationPortKey is the attribute Key conforming to the - // "destination.port" semantic conventions. It represents the destination - // port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - DestinationPortKey = attribute.Key("destination.port") -) - -// DestinationAddress returns an attribute KeyValue conforming to the -// "destination.address" semantic conventions. It represents the destination -// address - domain name if available without reverse DNS lookup; otherwise, IP -// address or Unix domain socket name. -func DestinationAddress(val string) attribute.KeyValue { - return DestinationAddressKey.String(val) -} - -// DestinationPort returns an attribute KeyValue conforming to the -// "destination.port" semantic conventions. It represents the destination port -// number -func DestinationPort(val int) attribute.KeyValue { - return DestinationPortKey.Int(val) -} - -// These attributes may be used for any disk related operation. -const ( - // DiskIoDirectionKey is the attribute Key conforming to the - // "disk.io.direction" semantic conventions. It represents the disk IO - // operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read' - DiskIoDirectionKey = attribute.Key("disk.io.direction") -) - -var ( - // read - DiskIoDirectionRead = DiskIoDirectionKey.String("read") - // write - DiskIoDirectionWrite = DiskIoDirectionKey.String("write") -) - -// The shared attributes used to report an error. -const ( - // ErrorTypeKey is the attribute Key conforming to the "error.type" - // semantic conventions. It represents the describes a class of error the - // operation ended with. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'timeout', 'java.net.UnknownHostException', - // 'server_certificate_invalid', '500' - // Note: The `error.type` SHOULD be predictable and SHOULD have low - // cardinality. - // Instrumentations SHOULD document the list of errors they report. - // - // The cardinality of `error.type` within one instrumentation library - // SHOULD be low. - // Telemetry consumers that aggregate data from multiple instrumentation - // libraries and applications - // should be prepared for `error.type` to have high cardinality at query - // time when no - // additional filters are applied. - // - // If the operation has completed successfully, instrumentations SHOULD NOT - // set `error.type`. - // - // If a specific domain defines its own set of error identifiers (such as - // HTTP or gRPC status codes), - // it's RECOMMENDED to: - // - // * Use a domain-specific attribute - // * Set `error.type` to capture all errors, regardless of whether they are - // defined within the domain-specific set or not. - ErrorTypeKey = attribute.Key("error.type") -) - -var ( - // A fallback error value to be used when the instrumentation doesn't define a custom value - ErrorTypeOther = ErrorTypeKey.String("_OTHER") -) - -// The shared attributes used to report a single exception associated with a -// span or log. -const ( - // ExceptionEscapedKey is the attribute Key conforming to the - // "exception.escaped" semantic conventions. It represents the sHOULD be - // set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: An exception is considered to have escaped (or left) the scope of - // a span, - // if that span is ended while the exception is still logically "in - // flight". - // This may be actually "in flight" in some languages (e.g. if the - // exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most - // languages. - // - // It is usually not possible to determine at the point where an exception - // is thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending - // the span, - // as done in the [example for recording span - // exceptions](#recording-an-exception). - // - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") - - // ExceptionMessageKey is the attribute Key conforming to the - // "exception.message" semantic conventions. It represents the exception - // message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Division by zero', "Can't convert 'int' object to str - // implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - - // ExceptionStacktraceKey is the attribute Key conforming to the - // "exception.stacktrace" semantic conventions. It represents a stacktrace - // as a string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - - // ExceptionTypeKey is the attribute Key conforming to the "exception.type" - // semantic conventions. It represents the type of the exception (its - // fully-qualified class name, if applicable). The dynamic type of the - // exception should be preferred over the static type in languages that - // support it. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") -) - -// ExceptionEscaped returns an attribute KeyValue conforming to the -// "exception.escaped" semantic conventions. It represents the sHOULD be set to -// true if the exception event is recorded at a point where it is known that -// the exception is escaping the scope of the span. -func ExceptionEscaped(val bool) attribute.KeyValue { - return ExceptionEscapedKey.Bool(val) -} - -// ExceptionMessage returns an attribute KeyValue conforming to the -// "exception.message" semantic conventions. It represents the exception -// message. -func ExceptionMessage(val string) attribute.KeyValue { - return ExceptionMessageKey.String(val) -} - -// ExceptionStacktrace returns an attribute KeyValue conforming to the -// "exception.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func ExceptionStacktrace(val string) attribute.KeyValue { - return ExceptionStacktraceKey.String(val) -} - -// ExceptionType returns an attribute KeyValue conforming to the -// "exception.type" semantic conventions. It represents the type of the -// exception (its fully-qualified class name, if applicable). The dynamic type -// of the exception should be preferred over the static type in languages that -// support it. -func ExceptionType(val string) attribute.KeyValue { - return ExceptionTypeKey.String(val) -} - -// Semantic convention attributes in the HTTP namespace. -const ( - // HTTPRequestBodySizeKey is the attribute Key conforming to the - // "http.request.body.size" semantic conventions. It represents the size of - // the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPRequestBodySizeKey = attribute.Key("http.request.body.size") - - // HTTPRequestMethodKey is the attribute Key conforming to the - // "http.request.method" semantic conventions. It represents the hTTP - // request method. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - // Note: HTTP request method value SHOULD be "known" to the - // instrumentation. - // By default, this convention defines "known" methods as the ones listed - // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods) - // and the PATCH method defined in - // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html). - // - // If the HTTP request method is not known to instrumentation, it MUST set - // the `http.request.method` attribute to `_OTHER`. - // - // If the HTTP instrumentation could end up converting valid HTTP request - // methods to `_OTHER`, then it MUST provide a way to override - // the list of known HTTP methods. If this override is done via environment - // variable, then the environment variable MUST be named - // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated - // list of case-sensitive known HTTP methods - // (this list MUST be a full override of the default known method, it is - // not a list of known methods in addition to the defaults). - // - // HTTP method names are case-sensitive and `http.request.method` attribute - // value MUST match a known HTTP method name exactly. - // Instrumentations for specific web frameworks that consider HTTP methods - // to be case insensitive, SHOULD populate a canonical equivalent. - // Tracing instrumentations that do so, MUST also set - // `http.request.method_original` to the original value. - HTTPRequestMethodKey = attribute.Key("http.request.method") - - // HTTPRequestMethodOriginalKey is the attribute Key conforming to the - // "http.request.method_original" semantic conventions. It represents the - // original HTTP method sent by the client in the request line. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'GeT', 'ACL', 'foo' - HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original") - - // HTTPRequestResendCountKey is the attribute Key conforming to the - // "http.request.resend_count" semantic conventions. It represents the - // ordinal number of request resending attempt (for any reason, including - // redirects). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 3 - // Note: The resend count SHOULD be updated each time an HTTP request gets - // resent by the client, regardless of what was the cause of the resending - // (e.g. redirection, authorization failure, 503 Server Unavailable, - // network issues, or any other). - HTTPRequestResendCountKey = attribute.Key("http.request.resend_count") - - // HTTPResponseBodySizeKey is the attribute Key conforming to the - // "http.response.body.size" semantic conventions. It represents the size - // of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as - // the - // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) - // header. For requests using transport encoding, this should be the - // compressed size. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3495 - HTTPResponseBodySizeKey = attribute.Key("http.response.body.size") - - // HTTPResponseStatusCodeKey is the attribute Key conforming to the - // "http.response.status_code" semantic conventions. It represents the - // [HTTP response status - // code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 200 - HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code") - - // HTTPRouteKey is the attribute Key conforming to the "http.route" - // semantic conventions. It represents the matched route, that is, the path - // template in the format used by the respective server framework. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/users/:userID?', '{controller}/{action}/{id?}' - // Note: MUST NOT be populated when this is not supported by the HTTP - // server framework as the route attribute should have low-cardinality and - // the URI path can NOT substitute it. - // SHOULD include the [application - // root](/docs/http/http-spans.md#http-server-definitions) if there is one. - HTTPRouteKey = attribute.Key("http.route") -) - -var ( - // CONNECT method - HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT") - // DELETE method - HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE") - // GET method - HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET") - // HEAD method - HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD") - // OPTIONS method - HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS") - // PATCH method - HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH") - // POST method - HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST") - // PUT method - HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT") - // TRACE method - HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE") - // Any HTTP method that the instrumentation has no prior knowledge of - HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER") -) - -// HTTPRequestBodySize returns an attribute KeyValue conforming to the -// "http.request.body.size" semantic conventions. It represents the size of the -// request payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPRequestBodySize(val int) attribute.KeyValue { - return HTTPRequestBodySizeKey.Int(val) -} - -// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the -// "http.request.method_original" semantic conventions. It represents the -// original HTTP method sent by the client in the request line. -func HTTPRequestMethodOriginal(val string) attribute.KeyValue { - return HTTPRequestMethodOriginalKey.String(val) -} - -// HTTPRequestResendCount returns an attribute KeyValue conforming to the -// "http.request.resend_count" semantic conventions. It represents the ordinal -// number of request resending attempt (for any reason, including redirects). -func HTTPRequestResendCount(val int) attribute.KeyValue { - return HTTPRequestResendCountKey.Int(val) -} - -// HTTPResponseBodySize returns an attribute KeyValue conforming to the -// "http.response.body.size" semantic conventions. It represents the size of -// the response payload body in bytes. This is the number of bytes transferred -// excluding headers and is often, but not always, present as the -// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) -// header. For requests using transport encoding, this should be the compressed -// size. -func HTTPResponseBodySize(val int) attribute.KeyValue { - return HTTPResponseBodySizeKey.Int(val) -} - -// HTTPResponseStatusCode returns an attribute KeyValue conforming to the -// "http.response.status_code" semantic conventions. It represents the [HTTP -// response status code](https://tools.ietf.org/html/rfc7231#section-6). -func HTTPResponseStatusCode(val int) attribute.KeyValue { - return HTTPResponseStatusCodeKey.Int(val) -} - -// HTTPRoute returns an attribute KeyValue conforming to the "http.route" -// semantic conventions. It represents the matched route, that is, the path -// template in the format used by the respective server framework. -func HTTPRoute(val string) attribute.KeyValue { - return HTTPRouteKey.String(val) -} - -// Attributes describing telemetry around messaging systems and messaging -// activities. -const ( - // MessagingBatchMessageCountKey is the attribute Key conforming to the - // "messaging.batch.message_count" semantic conventions. It represents the - // number of messages sent, received, or processed in the scope of the - // batching operation. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1, 2 - // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on - // spans that operate with a single message. When a messaging client - // library supports both batch and single-message API for the same - // operation, instrumentations SHOULD use `messaging.batch.message_count` - // for batching APIs and SHOULD NOT use it for single-message APIs. - MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") - - // MessagingClientIDKey is the attribute Key conforming to the - // "messaging.client_id" semantic conventions. It represents a unique - // identifier for the client that consumes or produces a message. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'client-5', 'myhost@8742@s8083jm' - MessagingClientIDKey = attribute.Key("messaging.client_id") - - // MessagingDestinationAnonymousKey is the attribute Key conforming to the - // "messaging.destination.anonymous" semantic conventions. It represents a - // boolean that is true if the message destination is anonymous (could be - // unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") - - // MessagingDestinationNameKey is the attribute Key conforming to the - // "messaging.destination.name" semantic conventions. It represents the - // message destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: Destination name SHOULD uniquely identify a specific queue, topic - // or other entity within the broker. If - // the broker doesn't have such notion, the destination name SHOULD - // uniquely identify the broker. - MessagingDestinationNameKey = attribute.Key("messaging.destination.name") - - // MessagingDestinationTemplateKey is the attribute Key conforming to the - // "messaging.destination.template" semantic conventions. It represents the - // low cardinality representation of the messaging destination name - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/customers/{customerID}' - // Note: Destination names could be constructed from templates. An example - // would be a destination name involving a user name or product id. - // Although the destination name in this case is of high cardinality, the - // underlying template is of low cardinality and can be effectively used - // for grouping and aggregation. - MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") - - // MessagingDestinationTemporaryKey is the attribute Key conforming to the - // "messaging.destination.temporary" semantic conventions. It represents a - // boolean that is true if the message destination is temporary and might - // not exist anymore after messages are processed. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") - - // MessagingDestinationPublishAnonymousKey is the attribute Key conforming - // to the "messaging.destination_publish.anonymous" semantic conventions. - // It represents a boolean that is true if the publish message destination - // is anonymous (could be unnamed or have auto-generated name). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous") - - // MessagingDestinationPublishNameKey is the attribute Key conforming to - // the "messaging.destination_publish.name" semantic conventions. It - // represents the name of the original destination the message was - // published to - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyQueue', 'MyTopic' - // Note: The name SHOULD uniquely identify a specific queue, topic, or - // other entity within the broker. If - // the broker doesn't have such notion, the original destination name - // SHOULD uniquely identify the broker. - MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name") - - // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming - // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. - // It represents the ordering key for a given message. If the attribute is - // not present, the message does not have an ordering key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ordering_key' - MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key") - - // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the - // "messaging.kafka.consumer.group" semantic conventions. It represents the - // name of the Kafka Consumer Group that is handling the message. Only - // applies to consumers, not producers. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") - - // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to - // the "messaging.kafka.destination.partition" semantic conventions. It - // represents the partition the message is sent to. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2 - MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") - - // MessagingKafkaMessageKeyKey is the attribute Key conforming to the - // "messaging.kafka.message.key" semantic conventions. It represents the - // message keys in Kafka are used for grouping alike messages to ensure - // they're processed on the same partition. They differ from - // `messaging.message.id` in that they're not unique. If the key is `null`, - // the attribute MUST NOT be set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to - // be supplied for the attribute. If the key has no unambiguous, canonical - // string form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") - - // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the - // "messaging.kafka.message.offset" semantic conventions. It represents the - // offset of a record in the corresponding Kafka partition. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") - - // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the - // "messaging.kafka.message.tombstone" semantic conventions. It represents - // a boolean that is true if the message is a tombstone. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") - - // MessagingMessageBodySizeKey is the attribute Key conforming to the - // "messaging.message.body.size" semantic conventions. It represents the - // size of the message body in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1439 - // Note: This can refer to both the compressed or uncompressed body size. - // If both sizes are known, the uncompressed - // body size should be used. - MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size") - - // MessagingMessageConversationIDKey is the attribute Key conforming to the - // "messaging.message.conversation_id" semantic conventions. It represents - // the conversation ID identifying the conversation to which the message - // belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MyConversationID' - MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") - - // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the - // "messaging.message.envelope.size" semantic conventions. It represents - // the size of the message body and metadata in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 2738 - // Note: This can refer to both the compressed or uncompressed size. If - // both sizes are known, the uncompressed - // size should be used. - MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size") - - // MessagingMessageIDKey is the attribute Key conforming to the - // "messaging.message.id" semantic conventions. It represents a value used - // by the messaging system as an identifier for the message, represented as - // a string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message.id") - - // MessagingOperationKey is the attribute Key conforming to the - // "messaging.operation" semantic conventions. It represents a string - // identifying the kind of messaging operation. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: If a custom value is used, it MUST be of low cardinality. - MessagingOperationKey = attribute.Key("messaging.operation") - - // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key - // conforming to the "messaging.rabbitmq.destination.routing_key" semantic - // conventions. It represents the rabbitMQ message routing key. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myKey' - MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") - - // MessagingRocketmqClientGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.client_group" semantic conventions. It represents - // the name of the RocketMQ producer/consumer group that is handling the - // message. The client type is identified by the SpanKind. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - - // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to - // the "messaging.rocketmq.consumption_model" semantic conventions. It - // represents the model of message consumption. This only applies to - // consumer spans. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") - - // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delay_time_level" semantic - // conventions. It represents the delay time level for delay message, which - // determines the message delay time. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3 - MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") - - // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key - // conforming to the "messaging.rocketmq.message.delivery_timestamp" - // semantic conventions. It represents the timestamp in milliseconds that - // the delay message is expected to be delivered to consumer. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1665987217045 - MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") - - // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the - // "messaging.rocketmq.message.group" semantic conventions. It represents - // the it is essential for FIFO message. Messages that belong to the same - // message group are always processed one by one within the same consumer - // group. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myMessageGroup' - MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") - - // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the - // "messaging.rocketmq.message.keys" semantic conventions. It represents - // the key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") - - // MessagingRocketmqMessageTagKey is the attribute Key conforming to the - // "messaging.rocketmq.message.tag" semantic conventions. It represents the - // secondary classifier of message besides topic. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") - - // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the - // "messaging.rocketmq.message.type" semantic conventions. It represents - // the type of message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") - - // MessagingRocketmqNamespaceKey is the attribute Key conforming to the - // "messaging.rocketmq.namespace" semantic conventions. It represents the - // namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - - // MessagingSystemKey is the attribute Key conforming to the - // "messaging.system" semantic conventions. It represents an identifier for - // the messaging system being used. See below for a list of well-known - // identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessagingSystemKey = attribute.Key("messaging.system") -) - -var ( - // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created - MessagingOperationPublish = MessagingOperationKey.String("publish") - // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios - MessagingOperationCreate = MessagingOperationKey.String("create") - // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages - MessagingOperationReceive = MessagingOperationKey.String("receive") - // One or more messages are passed to a consumer. This operation refers to push-based scenarios, where consumer register callbacks which get called by messaging SDKs - MessagingOperationDeliver = MessagingOperationKey.String("deliver") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Apache ActiveMQ - MessagingSystemActivemq = MessagingSystemKey.String("activemq") - // Amazon Simple Queue Service (SQS) - MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs") - // Azure Event Grid - MessagingSystemAzureEventgrid = MessagingSystemKey.String("azure_eventgrid") - // Azure Event Hubs - MessagingSystemAzureEventhubs = MessagingSystemKey.String("azure_eventhubs") - // Azure Service Bus - MessagingSystemAzureServicebus = MessagingSystemKey.String("azure_servicebus") - // Google Cloud Pub/Sub - MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub") - // Java Message Service - MessagingSystemJms = MessagingSystemKey.String("jms") - // Apache Kafka - MessagingSystemKafka = MessagingSystemKey.String("kafka") - // RabbitMQ - MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq") - // Apache RocketMQ - MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq") -) - -// MessagingBatchMessageCount returns an attribute KeyValue conforming to -// the "messaging.batch.message_count" semantic conventions. It represents the -// number of messages sent, received, or processed in the scope of the batching -// operation. -func MessagingBatchMessageCount(val int) attribute.KeyValue { - return MessagingBatchMessageCountKey.Int(val) -} - -// MessagingClientID returns an attribute KeyValue conforming to the -// "messaging.client_id" semantic conventions. It represents a unique -// identifier for the client that consumes or produces a message. -func MessagingClientID(val string) attribute.KeyValue { - return MessagingClientIDKey.String(val) -} - -// MessagingDestinationAnonymous returns an attribute KeyValue conforming to -// the "messaging.destination.anonymous" semantic conventions. It represents a -// boolean that is true if the message destination is anonymous (could be -// unnamed or have auto-generated name). -func MessagingDestinationAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationAnonymousKey.Bool(val) -} - -// MessagingDestinationName returns an attribute KeyValue conforming to the -// "messaging.destination.name" semantic conventions. It represents the message -// destination name -func MessagingDestinationName(val string) attribute.KeyValue { - return MessagingDestinationNameKey.String(val) -} - -// MessagingDestinationTemplate returns an attribute KeyValue conforming to -// the "messaging.destination.template" semantic conventions. It represents the -// low cardinality representation of the messaging destination name -func MessagingDestinationTemplate(val string) attribute.KeyValue { - return MessagingDestinationTemplateKey.String(val) -} - -// MessagingDestinationTemporary returns an attribute KeyValue conforming to -// the "messaging.destination.temporary" semantic conventions. It represents a -// boolean that is true if the message destination is temporary and might not -// exist anymore after messages are processed. -func MessagingDestinationTemporary(val bool) attribute.KeyValue { - return MessagingDestinationTemporaryKey.Bool(val) -} - -// MessagingDestinationPublishAnonymous returns an attribute KeyValue -// conforming to the "messaging.destination_publish.anonymous" semantic -// conventions. It represents a boolean that is true if the publish message -// destination is anonymous (could be unnamed or have auto-generated name). -func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue { - return MessagingDestinationPublishAnonymousKey.Bool(val) -} - -// MessagingDestinationPublishName returns an attribute KeyValue conforming -// to the "messaging.destination_publish.name" semantic conventions. It -// represents the name of the original destination the message was published to -func MessagingDestinationPublishName(val string) attribute.KeyValue { - return MessagingDestinationPublishNameKey.String(val) -} - -// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue -// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic -// conventions. It represents the ordering key for a given message. If the -// attribute is not present, the message does not have an ordering key. -func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue { - return MessagingGCPPubsubMessageOrderingKeyKey.String(val) -} - -// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to -// the "messaging.kafka.consumer.group" semantic conventions. It represents the -// name of the Kafka Consumer Group that is handling the message. Only applies -// to consumers, not producers. -func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { - return MessagingKafkaConsumerGroupKey.String(val) -} - -// MessagingKafkaDestinationPartition returns an attribute KeyValue -// conforming to the "messaging.kafka.destination.partition" semantic -// conventions. It represents the partition the message is sent to. -func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { - return MessagingKafkaDestinationPartitionKey.Int(val) -} - -// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the -// "messaging.kafka.message.key" semantic conventions. It represents the -// message keys in Kafka are used for grouping alike messages to ensure they're -// processed on the same partition. They differ from `messaging.message.id` in -// that they're not unique. If the key is `null`, the attribute MUST NOT be -// set. -func MessagingKafkaMessageKey(val string) attribute.KeyValue { - return MessagingKafkaMessageKeyKey.String(val) -} - -// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to -// the "messaging.kafka.message.offset" semantic conventions. It represents the -// offset of a record in the corresponding Kafka partition. -func MessagingKafkaMessageOffset(val int) attribute.KeyValue { - return MessagingKafkaMessageOffsetKey.Int(val) -} - -// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming -// to the "messaging.kafka.message.tombstone" semantic conventions. It -// represents a boolean that is true if the message is a tombstone. -func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { - return MessagingKafkaMessageTombstoneKey.Bool(val) -} - -// MessagingMessageBodySize returns an attribute KeyValue conforming to the -// "messaging.message.body.size" semantic conventions. It represents the size -// of the message body in bytes. -func MessagingMessageBodySize(val int) attribute.KeyValue { - return MessagingMessageBodySizeKey.Int(val) -} - -// MessagingMessageConversationID returns an attribute KeyValue conforming -// to the "messaging.message.conversation_id" semantic conventions. It -// represents the conversation ID identifying the conversation to which the -// message belongs, represented as a string. Sometimes called "Correlation ID". -func MessagingMessageConversationID(val string) attribute.KeyValue { - return MessagingMessageConversationIDKey.String(val) -} - -// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to -// the "messaging.message.envelope.size" semantic conventions. It represents -// the size of the message body and metadata in bytes. -func MessagingMessageEnvelopeSize(val int) attribute.KeyValue { - return MessagingMessageEnvelopeSizeKey.Int(val) -} - -// MessagingMessageID returns an attribute KeyValue conforming to the -// "messaging.message.id" semantic conventions. It represents a value used by -// the messaging system as an identifier for the message, represented as a -// string. -func MessagingMessageID(val string) attribute.KeyValue { - return MessagingMessageIDKey.String(val) -} - -// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue -// conforming to the "messaging.rabbitmq.destination.routing_key" semantic -// conventions. It represents the rabbitMQ message routing key. -func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { - return MessagingRabbitmqDestinationRoutingKeyKey.String(val) -} - -// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.client_group" semantic conventions. It represents -// the name of the RocketMQ producer/consumer group that is handling the -// message. The client type is identified by the SpanKind. -func MessagingRocketmqClientGroup(val string) attribute.KeyValue { - return MessagingRocketmqClientGroupKey.String(val) -} - -// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delay_time_level" semantic -// conventions. It represents the delay time level for delay message, which -// determines the message delay time. -func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { - return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) -} - -// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue -// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic -// conventions. It represents the timestamp in milliseconds that the delay -// message is expected to be delivered to consumer. -func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { - return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) -} - -// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.group" semantic conventions. It represents -// the it is essential for FIFO message. Messages that belong to the same -// message group are always processed one by one within the same consumer -// group. -func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { - return MessagingRocketmqMessageGroupKey.String(val) -} - -// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.keys" semantic conventions. It represents -// the key(s) of message, another way to mark message besides message id. -func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { - return MessagingRocketmqMessageKeysKey.StringSlice(val) -} - -// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to -// the "messaging.rocketmq.message.tag" semantic conventions. It represents the -// secondary classifier of message besides topic. -func MessagingRocketmqMessageTag(val string) attribute.KeyValue { - return MessagingRocketmqMessageTagKey.String(val) -} - -// MessagingRocketmqNamespace returns an attribute KeyValue conforming to -// the "messaging.rocketmq.namespace" semantic conventions. It represents the -// namespace of RocketMQ resources, resources in different namespaces are -// individual. -func MessagingRocketmqNamespace(val string) attribute.KeyValue { - return MessagingRocketmqNamespaceKey.String(val) -} - -// These attributes may be used for any network related operation. -const ( - // NetworkCarrierIccKey is the attribute Key conforming to the - // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 - // alpha-2 2-character country code associated with the mobile carrier - // network. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'DE' - NetworkCarrierIccKey = attribute.Key("network.carrier.icc") - - // NetworkCarrierMccKey is the attribute Key conforming to the - // "network.carrier.mcc" semantic conventions. It represents the mobile - // carrier country code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '310' - NetworkCarrierMccKey = attribute.Key("network.carrier.mcc") - - // NetworkCarrierMncKey is the attribute Key conforming to the - // "network.carrier.mnc" semantic conventions. It represents the mobile - // carrier network code. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '001' - NetworkCarrierMncKey = attribute.Key("network.carrier.mnc") - - // NetworkCarrierNameKey is the attribute Key conforming to the - // "network.carrier.name" semantic conventions. It represents the name of - // the mobile carrier. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'sprint' - NetworkCarrierNameKey = attribute.Key("network.carrier.name") - - // NetworkConnectionSubtypeKey is the attribute Key conforming to the - // "network.connection.subtype" semantic conventions. It represents the - // this describes more details regarding the connection.type. It may be the - // type of cell technology connection, but it could be used for describing - // details about a wifi connection. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'LTE' - NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype") - - // NetworkConnectionTypeKey is the attribute Key conforming to the - // "network.connection.type" semantic conventions. It represents the - // internet connection type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'wifi' - NetworkConnectionTypeKey = attribute.Key("network.connection.type") - - // NetworkIoDirectionKey is the attribute Key conforming to the - // "network.io.direction" semantic conventions. It represents the network - // IO operation direction. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'transmit' - NetworkIoDirectionKey = attribute.Key("network.io.direction") - - // NetworkLocalAddressKey is the attribute Key conforming to the - // "network.local.address" semantic conventions. It represents the local - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkLocalAddressKey = attribute.Key("network.local.address") - - // NetworkLocalPortKey is the attribute Key conforming to the - // "network.local.port" semantic conventions. It represents the local port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkLocalPortKey = attribute.Key("network.local.port") - - // NetworkPeerAddressKey is the attribute Key conforming to the - // "network.peer.address" semantic conventions. It represents the peer - // address of the network connection - IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '10.1.2.80', '/tmp/my.sock' - NetworkPeerAddressKey = attribute.Key("network.peer.address") - - // NetworkPeerPortKey is the attribute Key conforming to the - // "network.peer.port" semantic conventions. It represents the peer port - // number of the network connection. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 65123 - NetworkPeerPortKey = attribute.Key("network.peer.port") - - // NetworkProtocolNameKey is the attribute Key conforming to the - // "network.protocol.name" semantic conventions. It represents the [OSI - // application layer](https://osi-model.com/application-layer/) or non-OSI - // equivalent. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'amqp', 'http', 'mqtt' - // Note: The value SHOULD be normalized to lowercase. - NetworkProtocolNameKey = attribute.Key("network.protocol.name") - - // NetworkProtocolVersionKey is the attribute Key conforming to the - // "network.protocol.version" semantic conventions. It represents the - // version of the protocol specified in `network.protocol.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '3.1.1' - // Note: `network.protocol.version` refers to the version of the protocol - // used and might be different from the protocol client's version. If the - // HTTP client has a version of `0.27.2`, but sends HTTP version `1.1`, - // this attribute should be set to `1.1`. - NetworkProtocolVersionKey = attribute.Key("network.protocol.version") - - // NetworkTransportKey is the attribute Key conforming to the - // "network.transport" semantic conventions. It represents the [OSI - // transport layer](https://osi-model.com/transport-layer/) or - // [inter-process communication - // method](https://wikipedia.org/wiki/Inter-process_communication). - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'tcp', 'udp' - // Note: The value SHOULD be normalized to lowercase. - // - // Consider always setting the transport when setting a port number, since - // a port number is ambiguous without knowing the transport. For example - // different processes could be listening on TCP port 12345 and UDP port - // 12345. - NetworkTransportKey = attribute.Key("network.transport") - - // NetworkTypeKey is the attribute Key conforming to the "network.type" - // semantic conventions. It represents the [OSI network - // layer](https://osi-model.com/network-layer/) or non-OSI equivalent. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: stable - // Examples: 'ipv4', 'ipv6' - // Note: The value SHOULD be normalized to lowercase. - NetworkTypeKey = attribute.Key("network.type") -) - -var ( - // GPRS - NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs") - // EDGE - NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge") - // UMTS - NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts") - // CDMA - NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa") - // HSPA - NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa") - // IDEN - NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b") - // LTE - NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte") - // EHRPD - NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap") - // GSM - NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca") -) - -var ( - // wifi - NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi") - // wired - NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired") - // cell - NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell") - // unavailable - NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable") - // unknown - NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown") -) - -var ( - // transmit - NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit") - // receive - NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive") -) - -var ( - // TCP - NetworkTransportTCP = NetworkTransportKey.String("tcp") - // UDP - NetworkTransportUDP = NetworkTransportKey.String("udp") - // Named or anonymous pipe - NetworkTransportPipe = NetworkTransportKey.String("pipe") - // Unix domain socket - NetworkTransportUnix = NetworkTransportKey.String("unix") -) - -var ( - // IPv4 - NetworkTypeIpv4 = NetworkTypeKey.String("ipv4") - // IPv6 - NetworkTypeIpv6 = NetworkTypeKey.String("ipv6") -) - -// NetworkCarrierIcc returns an attribute KeyValue conforming to the -// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1 -// alpha-2 2-character country code associated with the mobile carrier network. -func NetworkCarrierIcc(val string) attribute.KeyValue { - return NetworkCarrierIccKey.String(val) -} - -// NetworkCarrierMcc returns an attribute KeyValue conforming to the -// "network.carrier.mcc" semantic conventions. It represents the mobile carrier -// country code. -func NetworkCarrierMcc(val string) attribute.KeyValue { - return NetworkCarrierMccKey.String(val) -} - -// NetworkCarrierMnc returns an attribute KeyValue conforming to the -// "network.carrier.mnc" semantic conventions. It represents the mobile carrier -// network code. -func NetworkCarrierMnc(val string) attribute.KeyValue { - return NetworkCarrierMncKey.String(val) -} - -// NetworkCarrierName returns an attribute KeyValue conforming to the -// "network.carrier.name" semantic conventions. It represents the name of the -// mobile carrier. -func NetworkCarrierName(val string) attribute.KeyValue { - return NetworkCarrierNameKey.String(val) -} - -// NetworkLocalAddress returns an attribute KeyValue conforming to the -// "network.local.address" semantic conventions. It represents the local -// address of the network connection - IP address or Unix domain socket name. -func NetworkLocalAddress(val string) attribute.KeyValue { - return NetworkLocalAddressKey.String(val) -} - -// NetworkLocalPort returns an attribute KeyValue conforming to the -// "network.local.port" semantic conventions. It represents the local port -// number of the network connection. -func NetworkLocalPort(val int) attribute.KeyValue { - return NetworkLocalPortKey.Int(val) -} - -// NetworkPeerAddress returns an attribute KeyValue conforming to the -// "network.peer.address" semantic conventions. It represents the peer address -// of the network connection - IP address or Unix domain socket name. -func NetworkPeerAddress(val string) attribute.KeyValue { - return NetworkPeerAddressKey.String(val) -} - -// NetworkPeerPort returns an attribute KeyValue conforming to the -// "network.peer.port" semantic conventions. It represents the peer port number -// of the network connection. -func NetworkPeerPort(val int) attribute.KeyValue { - return NetworkPeerPortKey.Int(val) -} - -// NetworkProtocolName returns an attribute KeyValue conforming to the -// "network.protocol.name" semantic conventions. It represents the [OSI -// application layer](https://osi-model.com/application-layer/) or non-OSI -// equivalent. -func NetworkProtocolName(val string) attribute.KeyValue { - return NetworkProtocolNameKey.String(val) -} - -// NetworkProtocolVersion returns an attribute KeyValue conforming to the -// "network.protocol.version" semantic conventions. It represents the version -// of the protocol specified in `network.protocol.name`. -func NetworkProtocolVersion(val string) attribute.KeyValue { - return NetworkProtocolVersionKey.String(val) -} - -// Attributes for remote procedure calls. -const ( - // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the - // "rpc.connect_rpc.error_code" semantic conventions. It represents the - // [error codes](https://connect.build/docs/protocol/#error-codes) of the - // Connect request. Error codes are always string values. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") - - // RPCGRPCStatusCodeKey is the attribute Key conforming to the - // "rpc.grpc.status_code" semantic conventions. It represents the [numeric - // status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of - // the gRPC request. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") - - // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_code" semantic conventions. It represents the - // `error.code` property of response if it is an error response. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - - // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the - // "rpc.jsonrpc.error_message" semantic conventions. It represents the - // `error.message` property of response if it is an error response. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") - - // RPCJsonrpcRequestIDKey is the attribute Key conforming to the - // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` - // property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be - // cast to string for simplicity. Use empty string in case of `null` value. - // Omit entirely if this is a notification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - - // RPCJsonrpcVersionKey is the attribute Key conforming to the - // "rpc.jsonrpc.version" semantic conventions. It represents the protocol - // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 - // doesn't specify this, the value can be omitted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - - // RPCMethodKey is the attribute Key conforming to the "rpc.method" - // semantic conventions. It represents the name of the (logical) method - // being called, must be equal to the $method part in the span name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the - // latter (e.g., method actually executing the call on the server side, RPC - // client stub method on the client side). - RPCMethodKey = attribute.Key("rpc.method") - - // RPCServiceKey is the attribute Key conforming to the "rpc.service" - // semantic conventions. It represents the full (logical) name of the - // service being called, including its package name, if applicable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing - // class. The `code.namespace` attribute may be used to store the latter - // (despite the attribute name, it may include a class name; e.g., class - // with method actually executing the call on the server side, RPC client - // stub class on the client side). - RPCServiceKey = attribute.Key("rpc.service") - - // RPCSystemKey is the attribute Key conforming to the "rpc.system" - // semantic conventions. It represents a string identifying the remoting - // system. See below for a list of well-known identifiers. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - RPCSystemKey = attribute.Key("rpc.system") -) - -var ( - // cancelled - RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") - // unknown - RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") - // invalid_argument - RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") - // deadline_exceeded - RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") - // not_found - RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") - // already_exists - RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") - // permission_denied - RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") - // resource_exhausted - RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") - // failed_precondition - RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") - // aborted - RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") - // out_of_range - RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") - // unimplemented - RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") - // internal - RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") - // unavailable - RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") - // data_loss - RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") - // unauthenticated - RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") - // Connect RPC - RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") -) - -// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_code" semantic conventions. It represents the -// `error.code` property of response if it is an error response. -func RPCJsonrpcErrorCode(val int) attribute.KeyValue { - return RPCJsonrpcErrorCodeKey.Int(val) -} - -// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.error_message" semantic conventions. It represents the -// `error.message` property of response if it is an error response. -func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { - return RPCJsonrpcErrorMessageKey.String(val) -} - -// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` -// property of request or response. Since protocol allows id to be int, string, -// `null` or missing (for notifications), value is expected to be cast to -// string for simplicity. Use empty string in case of `null` value. Omit -// entirely if this is a notification. -func RPCJsonrpcRequestID(val string) attribute.KeyValue { - return RPCJsonrpcRequestIDKey.String(val) -} - -// RPCJsonrpcVersion returns an attribute KeyValue conforming to the -// "rpc.jsonrpc.version" semantic conventions. It represents the protocol -// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 -// doesn't specify this, the value can be omitted. -func RPCJsonrpcVersion(val string) attribute.KeyValue { - return RPCJsonrpcVersionKey.String(val) -} - -// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" -// semantic conventions. It represents the name of the (logical) method being -// called, must be equal to the $method part in the span name. -func RPCMethod(val string) attribute.KeyValue { - return RPCMethodKey.String(val) -} - -// RPCService returns an attribute KeyValue conforming to the "rpc.service" -// semantic conventions. It represents the full (logical) name of the service -// being called, including its package name, if applicable. -func RPCService(val string) attribute.KeyValue { - return RPCServiceKey.String(val) -} - -// These attributes may be used to describe the server in a connection-based -// network interaction where there is one side that initiates the connection -// (the client is the side that initiates the connection). This covers all TCP -// network interactions since TCP is connection-based and one side initiates -// the connection (an exception is made for peer-to-peer communication over TCP -// where the "user-facing" surface of the protocol / API doesn't expose a clear -// notion of client and server). This also covers UDP network interactions -// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS. -const ( - // ServerAddressKey is the attribute Key conforming to the "server.address" - // semantic conventions. It represents the server domain name if available - // without reverse DNS lookup; otherwise, IP address or Unix domain socket - // name. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.address` SHOULD represent the server address - // behind any intermediaries, for example proxies, if it's available. - ServerAddressKey = attribute.Key("server.address") - - // ServerPortKey is the attribute Key conforming to the "server.port" - // semantic conventions. It represents the server port number. - // - // Type: int - // RequirementLevel: Optional - // Stability: stable - // Examples: 80, 8080, 443 - // Note: When observed from the client side, and when communicating through - // an intermediary, `server.port` SHOULD represent the server port behind - // any intermediaries, for example proxies, if it's available. - ServerPortKey = attribute.Key("server.port") -) - -// ServerAddress returns an attribute KeyValue conforming to the -// "server.address" semantic conventions. It represents the server domain name -// if available without reverse DNS lookup; otherwise, IP address or Unix -// domain socket name. -func ServerAddress(val string) attribute.KeyValue { - return ServerAddressKey.String(val) -} - -// ServerPort returns an attribute KeyValue conforming to the "server.port" -// semantic conventions. It represents the server port number. -func ServerPort(val int) attribute.KeyValue { - return ServerPortKey.Int(val) -} - -// These attributes may be used to describe the sender of a network -// exchange/packet. These should be used when there is no client/server -// relationship between the two sides, or when that relationship is unknown. -// This covers low-level network interactions (e.g. packet tracing) where you -// don't know if there was a connection or which side initiated it. This also -// covers unidirectional UDP flows and peer-to-peer communication where the -// "user-facing" surface of the protocol / API doesn't expose a clear notion of -// client and server. -const ( - // SourceAddressKey is the attribute Key conforming to the "source.address" - // semantic conventions. It represents the source address - domain name if - // available without reverse DNS lookup; otherwise, IP address or Unix - // domain socket name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock' - // Note: When observed from the destination side, and when communicating - // through an intermediary, `source.address` SHOULD represent the source - // address behind any intermediaries, for example proxies, if it's - // available. - SourceAddressKey = attribute.Key("source.address") - - // SourcePortKey is the attribute Key conforming to the "source.port" - // semantic conventions. It represents the source port number - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3389, 2888 - SourcePortKey = attribute.Key("source.port") -) - -// SourceAddress returns an attribute KeyValue conforming to the -// "source.address" semantic conventions. It represents the source address - -// domain name if available without reverse DNS lookup; otherwise, IP address -// or Unix domain socket name. -func SourceAddress(val string) attribute.KeyValue { - return SourceAddressKey.String(val) -} - -// SourcePort returns an attribute KeyValue conforming to the "source.port" -// semantic conventions. It represents the source port number -func SourcePort(val int) attribute.KeyValue { - return SourcePortKey.Int(val) -} - -// Semantic convention attributes in the TLS namespace. -const ( - // TLSCipherKey is the attribute Key conforming to the "tls.cipher" - // semantic conventions. It represents the string indicating the - // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) - // used during the current connection. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA', - // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' - // Note: The values allowed for `tls.cipher` MUST be one of the - // `Descriptions` of the [registered TLS Cipher - // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4). - TLSCipherKey = attribute.Key("tls.cipher") - - // TLSClientCertificateKey is the attribute Key conforming to the - // "tls.client.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the client. This is - // usually mutually-exclusive of `client.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSClientCertificateKey = attribute.Key("tls.client.certificate") - - // TLSClientCertificateChainKey is the attribute Key conforming to the - // "tls.client.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the client. This is usually mutually-exclusive of - // `client.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain") - - // TLSClientHashMd5Key is the attribute Key conforming to the - // "tls.client.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5") - - // TLSClientHashSha1Key is the attribute Key conforming to the - // "tls.client.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1") - - // TLSClientHashSha256Key is the attribute Key conforming to the - // "tls.client.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the client. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256") - - // TLSClientIssuerKey is the attribute Key conforming to the - // "tls.client.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSClientIssuerKey = attribute.Key("tls.client.issuer") - - // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3" - // semantic conventions. It represents a hash that identifies clients based - // on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSClientJa3Key = attribute.Key("tls.client.ja3") - - // TLSClientNotAfterKey is the attribute Key conforming to the - // "tls.client.not_after" semantic conventions. It represents the date/Time - // indicating when client certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSClientNotAfterKey = attribute.Key("tls.client.not_after") - - // TLSClientNotBeforeKey is the attribute Key conforming to the - // "tls.client.not_before" semantic conventions. It represents the - // date/Time indicating when client certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSClientNotBeforeKey = attribute.Key("tls.client.not_before") - - // TLSClientServerNameKey is the attribute Key conforming to the - // "tls.client.server_name" semantic conventions. It represents the also - // called an SNI, this tells the server which hostname to which the client - // is attempting to connect to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry.io' - TLSClientServerNameKey = attribute.Key("tls.client.server_name") - - // TLSClientSubjectKey is the attribute Key conforming to the - // "tls.client.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com' - TLSClientSubjectKey = attribute.Key("tls.client.subject") - - // TLSClientSupportedCiphersKey is the attribute Key conforming to the - // "tls.client.supported_ciphers" semantic conventions. It represents the - // array of ciphers offered by the client during the client hello. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."' - TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers") - - // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic - // conventions. It represents the string indicating the curve used for the - // given cipher, when applicable - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'secp256r1' - TLSCurveKey = attribute.Key("tls.curve") - - // TLSEstablishedKey is the attribute Key conforming to the - // "tls.established" semantic conventions. It represents the boolean flag - // indicating if the TLS negotiation was successful and transitioned to an - // encrypted tunnel. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSEstablishedKey = attribute.Key("tls.established") - - // TLSNextProtocolKey is the attribute Key conforming to the - // "tls.next_protocol" semantic conventions. It represents the string - // indicating the protocol being tunneled. Per the values in the [IANA - // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), - // this string should be lower case. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'http/1.1' - TLSNextProtocolKey = attribute.Key("tls.next_protocol") - - // TLSProtocolNameKey is the attribute Key conforming to the - // "tls.protocol.name" semantic conventions. It represents the normalized - // lowercase protocol name parsed from original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - TLSProtocolNameKey = attribute.Key("tls.protocol.name") - - // TLSProtocolVersionKey is the attribute Key conforming to the - // "tls.protocol.version" semantic conventions. It represents the numeric - // part of the version parsed from the original string of the negotiated - // [SSL/TLS protocol - // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2', '3' - TLSProtocolVersionKey = attribute.Key("tls.protocol.version") - - // TLSResumedKey is the attribute Key conforming to the "tls.resumed" - // semantic conventions. It represents the boolean flag indicating if this - // TLS connection was resumed from an existing TLS negotiation. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Examples: True - TLSResumedKey = attribute.Key("tls.resumed") - - // TLSServerCertificateKey is the attribute Key conforming to the - // "tls.server.certificate" semantic conventions. It represents the - // pEM-encoded stand-alone certificate offered by the server. This is - // usually mutually-exclusive of `server.certificate_chain` since this - // value also exists in that list. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...' - TLSServerCertificateKey = attribute.Key("tls.server.certificate") - - // TLSServerCertificateChainKey is the attribute Key conforming to the - // "tls.server.certificate_chain" semantic conventions. It represents the - // array of PEM-encoded certificates that make up the certificate chain - // offered by the server. This is usually mutually-exclusive of - // `server.certificate` since that value should be the first certificate in - // the chain. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'MII...', 'MI...' - TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain") - - // TLSServerHashMd5Key is the attribute Key conforming to the - // "tls.server.hash.md5" semantic conventions. It represents the - // certificate fingerprint using the MD5 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC' - TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5") - - // TLSServerHashSha1Key is the attribute Key conforming to the - // "tls.server.hash.sha1" semantic conventions. It represents the - // certificate fingerprint using the SHA1 digest of DER-encoded version of - // certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A' - TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1") - - // TLSServerHashSha256Key is the attribute Key conforming to the - // "tls.server.hash.sha256" semantic conventions. It represents the - // certificate fingerprint using the SHA256 digest of DER-encoded version - // of certificate offered by the server. For consistency with other hash - // values, this value should be formatted as an uppercase hash. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0' - TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256") - - // TLSServerIssuerKey is the attribute Key conforming to the - // "tls.server.issuer" semantic conventions. It represents the - // distinguished name of - // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) - // of the issuer of the x.509 certificate presented by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example, - // DC=com' - TLSServerIssuerKey = attribute.Key("tls.server.issuer") - - // TLSServerJa3sKey is the attribute Key conforming to the - // "tls.server.ja3s" semantic conventions. It represents a hash that - // identifies servers based on how they perform an SSL/TLS handshake. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'd4e5b18d6b55c71272893221c96ba240' - TLSServerJa3sKey = attribute.Key("tls.server.ja3s") - - // TLSServerNotAfterKey is the attribute Key conforming to the - // "tls.server.not_after" semantic conventions. It represents the date/Time - // indicating when server certificate is no longer considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021-01-01T00:00:00.000Z' - TLSServerNotAfterKey = attribute.Key("tls.server.not_after") - - // TLSServerNotBeforeKey is the attribute Key conforming to the - // "tls.server.not_before" semantic conventions. It represents the - // date/Time indicating when server certificate is first considered valid. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1970-01-01T00:00:00.000Z' - TLSServerNotBeforeKey = attribute.Key("tls.server.not_before") - - // TLSServerSubjectKey is the attribute Key conforming to the - // "tls.server.subject" semantic conventions. It represents the - // distinguished name of subject of the x.509 certificate presented by the - // server. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com' - TLSServerSubjectKey = attribute.Key("tls.server.subject") -) - -var ( - // ssl - TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl") - // tls - TLSProtocolNameTLS = TLSProtocolNameKey.String("tls") -) - -// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher" -// semantic conventions. It represents the string indicating the -// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used -// during the current connection. -func TLSCipher(val string) attribute.KeyValue { - return TLSCipherKey.String(val) -} - -// TLSClientCertificate returns an attribute KeyValue conforming to the -// "tls.client.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the client. This is usually -// mutually-exclusive of `client.certificate_chain` since this value also -// exists in that list. -func TLSClientCertificate(val string) attribute.KeyValue { - return TLSClientCertificateKey.String(val) -} - -// TLSClientCertificateChain returns an attribute KeyValue conforming to the -// "tls.client.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the client. This is usually mutually-exclusive of `client.certificate` since -// that value should be the first certificate in the chain. -func TLSClientCertificateChain(val ...string) attribute.KeyValue { - return TLSClientCertificateChainKey.StringSlice(val) -} - -// TLSClientHashMd5 returns an attribute KeyValue conforming to the -// "tls.client.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashMd5(val string) attribute.KeyValue { - return TLSClientHashMd5Key.String(val) -} - -// TLSClientHashSha1 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha1(val string) attribute.KeyValue { - return TLSClientHashSha1Key.String(val) -} - -// TLSClientHashSha256 returns an attribute KeyValue conforming to the -// "tls.client.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the client. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSClientHashSha256(val string) attribute.KeyValue { - return TLSClientHashSha256Key.String(val) -} - -// TLSClientIssuer returns an attribute KeyValue conforming to the -// "tls.client.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSClientIssuer(val string) attribute.KeyValue { - return TLSClientIssuerKey.String(val) -} - -// TLSClientJa3 returns an attribute KeyValue conforming to the -// "tls.client.ja3" semantic conventions. It represents a hash that identifies -// clients based on how they perform an SSL/TLS handshake. -func TLSClientJa3(val string) attribute.KeyValue { - return TLSClientJa3Key.String(val) -} - -// TLSClientNotAfter returns an attribute KeyValue conforming to the -// "tls.client.not_after" semantic conventions. It represents the date/Time -// indicating when client certificate is no longer considered valid. -func TLSClientNotAfter(val string) attribute.KeyValue { - return TLSClientNotAfterKey.String(val) -} - -// TLSClientNotBefore returns an attribute KeyValue conforming to the -// "tls.client.not_before" semantic conventions. It represents the date/Time -// indicating when client certificate is first considered valid. -func TLSClientNotBefore(val string) attribute.KeyValue { - return TLSClientNotBeforeKey.String(val) -} - -// TLSClientServerName returns an attribute KeyValue conforming to the -// "tls.client.server_name" semantic conventions. It represents the also called -// an SNI, this tells the server which hostname to which the client is -// attempting to connect to. -func TLSClientServerName(val string) attribute.KeyValue { - return TLSClientServerNameKey.String(val) -} - -// TLSClientSubject returns an attribute KeyValue conforming to the -// "tls.client.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the client. -func TLSClientSubject(val string) attribute.KeyValue { - return TLSClientSubjectKey.String(val) -} - -// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the -// "tls.client.supported_ciphers" semantic conventions. It represents the array -// of ciphers offered by the client during the client hello. -func TLSClientSupportedCiphers(val ...string) attribute.KeyValue { - return TLSClientSupportedCiphersKey.StringSlice(val) -} - -// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" -// semantic conventions. It represents the string indicating the curve used for -// the given cipher, when applicable -func TLSCurve(val string) attribute.KeyValue { - return TLSCurveKey.String(val) -} - -// TLSEstablished returns an attribute KeyValue conforming to the -// "tls.established" semantic conventions. It represents the boolean flag -// indicating if the TLS negotiation was successful and transitioned to an -// encrypted tunnel. -func TLSEstablished(val bool) attribute.KeyValue { - return TLSEstablishedKey.Bool(val) -} - -// TLSNextProtocol returns an attribute KeyValue conforming to the -// "tls.next_protocol" semantic conventions. It represents the string -// indicating the protocol being tunneled. Per the values in the [IANA -// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids), -// this string should be lower case. -func TLSNextProtocol(val string) attribute.KeyValue { - return TLSNextProtocolKey.String(val) -} - -// TLSProtocolVersion returns an attribute KeyValue conforming to the -// "tls.protocol.version" semantic conventions. It represents the numeric part -// of the version parsed from the original string of the negotiated [SSL/TLS -// protocol -// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES) -func TLSProtocolVersion(val string) attribute.KeyValue { - return TLSProtocolVersionKey.String(val) -} - -// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed" -// semantic conventions. It represents the boolean flag indicating if this TLS -// connection was resumed from an existing TLS negotiation. -func TLSResumed(val bool) attribute.KeyValue { - return TLSResumedKey.Bool(val) -} - -// TLSServerCertificate returns an attribute KeyValue conforming to the -// "tls.server.certificate" semantic conventions. It represents the pEM-encoded -// stand-alone certificate offered by the server. This is usually -// mutually-exclusive of `server.certificate_chain` since this value also -// exists in that list. -func TLSServerCertificate(val string) attribute.KeyValue { - return TLSServerCertificateKey.String(val) -} - -// TLSServerCertificateChain returns an attribute KeyValue conforming to the -// "tls.server.certificate_chain" semantic conventions. It represents the array -// of PEM-encoded certificates that make up the certificate chain offered by -// the server. This is usually mutually-exclusive of `server.certificate` since -// that value should be the first certificate in the chain. -func TLSServerCertificateChain(val ...string) attribute.KeyValue { - return TLSServerCertificateChainKey.StringSlice(val) -} - -// TLSServerHashMd5 returns an attribute KeyValue conforming to the -// "tls.server.hash.md5" semantic conventions. It represents the certificate -// fingerprint using the MD5 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashMd5(val string) attribute.KeyValue { - return TLSServerHashMd5Key.String(val) -} - -// TLSServerHashSha1 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha1" semantic conventions. It represents the certificate -// fingerprint using the SHA1 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha1(val string) attribute.KeyValue { - return TLSServerHashSha1Key.String(val) -} - -// TLSServerHashSha256 returns an attribute KeyValue conforming to the -// "tls.server.hash.sha256" semantic conventions. It represents the certificate -// fingerprint using the SHA256 digest of DER-encoded version of certificate -// offered by the server. For consistency with other hash values, this value -// should be formatted as an uppercase hash. -func TLSServerHashSha256(val string) attribute.KeyValue { - return TLSServerHashSha256Key.String(val) -} - -// TLSServerIssuer returns an attribute KeyValue conforming to the -// "tls.server.issuer" semantic conventions. It represents the distinguished -// name of -// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of -// the issuer of the x.509 certificate presented by the client. -func TLSServerIssuer(val string) attribute.KeyValue { - return TLSServerIssuerKey.String(val) -} - -// TLSServerJa3s returns an attribute KeyValue conforming to the -// "tls.server.ja3s" semantic conventions. It represents a hash that identifies -// servers based on how they perform an SSL/TLS handshake. -func TLSServerJa3s(val string) attribute.KeyValue { - return TLSServerJa3sKey.String(val) -} - -// TLSServerNotAfter returns an attribute KeyValue conforming to the -// "tls.server.not_after" semantic conventions. It represents the date/Time -// indicating when server certificate is no longer considered valid. -func TLSServerNotAfter(val string) attribute.KeyValue { - return TLSServerNotAfterKey.String(val) -} - -// TLSServerNotBefore returns an attribute KeyValue conforming to the -// "tls.server.not_before" semantic conventions. It represents the date/Time -// indicating when server certificate is first considered valid. -func TLSServerNotBefore(val string) attribute.KeyValue { - return TLSServerNotBeforeKey.String(val) -} - -// TLSServerSubject returns an attribute KeyValue conforming to the -// "tls.server.subject" semantic conventions. It represents the distinguished -// name of subject of the x.509 certificate presented by the server. -func TLSServerSubject(val string) attribute.KeyValue { - return TLSServerSubjectKey.String(val) -} - -// Attributes describing URL. -const ( - // URLFragmentKey is the attribute Key conforming to the "url.fragment" - // semantic conventions. It represents the [URI - // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'SemConv' - URLFragmentKey = attribute.Key("url.fragment") - - // URLFullKey is the attribute Key conforming to the "url.full" semantic - // conventions. It represents the absolute URL describing a network - // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv', - // '//localhost' - // Note: For network calls, URL usually has - // `scheme://host[:port][path][?query][#fragment]` format, where the - // fragment is not transmitted over HTTP, but if it is known, it SHOULD be - // included nevertheless. - // `url.full` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case username and - // password SHOULD be redacted and attribute's value SHOULD be - // `https://REDACTED:REDACTED@www.example.com/`. - // `url.full` SHOULD capture the absolute URL when it is available (or can - // be reconstructed) and SHOULD NOT be validated or modified except for - // sanitizing purposes. - URLFullKey = attribute.Key("url.full") - - // URLPathKey is the attribute Key conforming to the "url.path" semantic - // conventions. It represents the [URI - // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: '/search' - URLPathKey = attribute.Key("url.path") - - // URLQueryKey is the attribute Key conforming to the "url.query" semantic - // conventions. It represents the [URI - // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'q=OpenTelemetry' - // Note: Sensitive content provided in query string SHOULD be scrubbed when - // instrumentations can identify it. - URLQueryKey = attribute.Key("url.query") - - // URLSchemeKey is the attribute Key conforming to the "url.scheme" - // semantic conventions. It represents the [URI - // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component - // identifying the used protocol. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'https', 'ftp', 'telnet' - URLSchemeKey = attribute.Key("url.scheme") -) - -// URLFragment returns an attribute KeyValue conforming to the -// "url.fragment" semantic conventions. It represents the [URI -// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component -func URLFragment(val string) attribute.KeyValue { - return URLFragmentKey.String(val) -} - -// URLFull returns an attribute KeyValue conforming to the "url.full" -// semantic conventions. It represents the absolute URL describing a network -// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986) -func URLFull(val string) attribute.KeyValue { - return URLFullKey.String(val) -} - -// URLPath returns an attribute KeyValue conforming to the "url.path" -// semantic conventions. It represents the [URI -// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component -func URLPath(val string) attribute.KeyValue { - return URLPathKey.String(val) -} - -// URLQuery returns an attribute KeyValue conforming to the "url.query" -// semantic conventions. It represents the [URI -// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component -func URLQuery(val string) attribute.KeyValue { - return URLQueryKey.String(val) -} - -// URLScheme returns an attribute KeyValue conforming to the "url.scheme" -// semantic conventions. It represents the [URI -// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component -// identifying the used protocol. -func URLScheme(val string) attribute.KeyValue { - return URLSchemeKey.String(val) -} - -// Describes user-agent attributes. -const ( - // UserAgentOriginalKey is the attribute Key conforming to the - // "user_agent.original" semantic conventions. It represents the value of - // the [HTTP - // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) - // header sent by the client. - // - // Type: string - // RequirementLevel: Optional - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU - // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) - // Version/14.1.2 Mobile/15E148 Safari/604.1' - UserAgentOriginalKey = attribute.Key("user_agent.original") -) - -// UserAgentOriginal returns an attribute KeyValue conforming to the -// "user_agent.original" semantic conventions. It represents the value of the -// [HTTP -// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) -// header sent by the client. -func UserAgentOriginal(val string) attribute.KeyValue { - return UserAgentOriginalKey.String(val) -} - -// Session is defined as the period of time encompassing all activities -// performed by the application and the actions executed by the end user. -// Consequently, a Session is represented as a collection of Logs, Events, and -// Spans emitted by the Client Application throughout the Session's duration. -// Each Session is assigned a unique identifier, which is included as an -// attribute in the Logs, Events, and Spans generated during the Session's -// lifecycle. -// When a session reaches end of life, typically due to user inactivity or -// session timeout, a new session identifier will be assigned. The previous -// session identifier may be provided by the instrumentation so that telemetry -// backends can link the two sessions. -const ( - // SessionIDKey is the attribute Key conforming to the "session.id" - // semantic conventions. It represents a unique id to identify a session. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionIDKey = attribute.Key("session.id") - - // SessionPreviousIDKey is the attribute Key conforming to the - // "session.previous_id" semantic conventions. It represents the previous - // `session.id` for this user, when known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '00112233-4455-6677-8899-aabbccddeeff' - SessionPreviousIDKey = attribute.Key("session.previous_id") -) - -// SessionID returns an attribute KeyValue conforming to the "session.id" -// semantic conventions. It represents a unique id to identify a session. -func SessionID(val string) attribute.KeyValue { - return SessionIDKey.String(val) -} - -// SessionPreviousID returns an attribute KeyValue conforming to the -// "session.previous_id" semantic conventions. It represents the previous -// `session.id` for this user, when known. -func SessionPreviousID(val string) attribute.KeyValue { - return SessionPreviousIDKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go deleted file mode 100644 index d27e8a8f8b3f6..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the v1.24.0 -// version of the OpenTelemetry semantic conventions. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go deleted file mode 100644 index 6c019aafc3ef4..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/event.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// This event represents an occurrence of a lifecycle transition on the iOS -// platform. -const ( - // IosStateKey is the attribute Key conforming to the "ios.state" semantic - // conventions. It represents the this attribute represents the state the - // application has transitioned into at the occurrence of the event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The iOS lifecycle states are defined in the [UIApplicationDelegate - // documentation](https://developer.apple.com/documentation/uikit/uiapplicationdelegate#1656902), - // and from which the `OS terminology` column values are derived. - IosStateKey = attribute.Key("ios.state") -) - -var ( - // The app has become `active`. Associated with UIKit notification `applicationDidBecomeActive` - IosStateActive = IosStateKey.String("active") - // The app is now `inactive`. Associated with UIKit notification `applicationWillResignActive` - IosStateInactive = IosStateKey.String("inactive") - // The app is now in the background. This value is associated with UIKit notification `applicationDidEnterBackground` - IosStateBackground = IosStateKey.String("background") - // The app is now in the foreground. This value is associated with UIKit notification `applicationWillEnterForeground` - IosStateForeground = IosStateKey.String("foreground") - // The app is about to terminate. Associated with UIKit notification `applicationWillTerminate` - IosStateTerminate = IosStateKey.String("terminate") -) - -// This event represents an occurrence of a lifecycle transition on the Android -// platform. -const ( - // AndroidStateKey is the attribute Key conforming to the "android.state" - // semantic conventions. It represents the this attribute represents the - // state the application has transitioned into at the occurrence of the - // event. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - // Note: The Android lifecycle states are defined in [Activity lifecycle - // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc), - // and from which the `OS identifiers` are derived. - AndroidStateKey = attribute.Key("android.state") -) - -var ( - // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time - AndroidStateCreated = AndroidStateKey.String("created") - // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state - AndroidStateBackground = AndroidStateKey.String("background") - // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states - AndroidStateForeground = AndroidStateKey.String("foreground") -) - -// This semantic convention defines the attributes used to represent a feature -// flag evaluation as an event. -const ( - // FeatureFlagKeyKey is the attribute Key conforming to the - // "feature_flag.key" semantic conventions. It represents the unique - // identifier of the feature flag. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'logo-color' - FeatureFlagKeyKey = attribute.Key("feature_flag.key") - - // FeatureFlagProviderNameKey is the attribute Key conforming to the - // "feature_flag.provider_name" semantic conventions. It represents the - // name of the service provider that performs the flag evaluation. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'Flag Manager' - FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") - - // FeatureFlagVariantKey is the attribute Key conforming to the - // "feature_flag.variant" semantic conventions. It represents the sHOULD be - // a semantic identifier for a value. If one is unavailable, a stringified - // version of the value can be used. - // - // Type: string - // RequirementLevel: Recommended - // Stability: experimental - // Examples: 'red', 'true', 'on' - // Note: A semantic identifier, commonly referred to as a variant, provides - // a means - // for referring to a value without including the value itself. This can - // provide additional context for understanding the meaning behind a value. - // For example, the variant `red` maybe be used for the value `#c05543`. - // - // A stringified version of the value can be used in situations where a - // semantic identifier is unavailable. String representation of the value - // should be determined by the implementer. - FeatureFlagVariantKey = attribute.Key("feature_flag.variant") -) - -// FeatureFlagKey returns an attribute KeyValue conforming to the -// "feature_flag.key" semantic conventions. It represents the unique identifier -// of the feature flag. -func FeatureFlagKey(val string) attribute.KeyValue { - return FeatureFlagKeyKey.String(val) -} - -// FeatureFlagProviderName returns an attribute KeyValue conforming to the -// "feature_flag.provider_name" semantic conventions. It represents the name of -// the service provider that performs the flag evaluation. -func FeatureFlagProviderName(val string) attribute.KeyValue { - return FeatureFlagProviderNameKey.String(val) -} - -// FeatureFlagVariant returns an attribute KeyValue conforming to the -// "feature_flag.variant" semantic conventions. It represents the sHOULD be a -// semantic identifier for a value. If one is unavailable, a stringified -// version of the value can be used. -func FeatureFlagVariant(val string) attribute.KeyValue { - return FeatureFlagVariantKey.String(val) -} - -// RPC received/sent message. -const ( - // MessageCompressedSizeKey is the attribute Key conforming to the - // "message.compressed_size" semantic conventions. It represents the - // compressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - - // MessageIDKey is the attribute Key conforming to the "message.id" - // semantic conventions. It represents the mUST be calculated as two - // different counters starting from `1` one for sent messages and one for - // received message. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - - // MessageTypeKey is the attribute Key conforming to the "message.type" - // semantic conventions. It represents the whether this is a received or - // sent message. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - MessageTypeKey = attribute.Key("message.type") - - // MessageUncompressedSizeKey is the attribute Key conforming to the - // "message.uncompressed_size" semantic conventions. It represents the - // uncompressed size of the message in bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) - -// MessageCompressedSize returns an attribute KeyValue conforming to the -// "message.compressed_size" semantic conventions. It represents the compressed -// size of the message in bytes. -func MessageCompressedSize(val int) attribute.KeyValue { - return MessageCompressedSizeKey.Int(val) -} - -// MessageID returns an attribute KeyValue conforming to the "message.id" -// semantic conventions. It represents the mUST be calculated as two different -// counters starting from `1` one for sent messages and one for received -// message. -func MessageID(val int) attribute.KeyValue { - return MessageIDKey.Int(val) -} - -// MessageUncompressedSize returns an attribute KeyValue conforming to the -// "message.uncompressed_size" semantic conventions. It represents the -// uncompressed size of the message in bytes. -func MessageUncompressedSize(val int) attribute.KeyValue { - return MessageUncompressedSizeKey.Int(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go deleted file mode 100644 index 7235bb51d9a48..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/exception.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go deleted file mode 100644 index a6b953f625e57..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/metric.go +++ /dev/null @@ -1,1071 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -const ( - - // DBClientConnectionsUsage is the metric conforming to the - // "db.client.connections.usage" semantic conventions. It represents the number - // of connections that are currently in state described by the `state` - // attribute. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsUsageName = "db.client.connections.usage" - DBClientConnectionsUsageUnit = "{connection}" - DBClientConnectionsUsageDescription = "The number of connections that are currently in state described by the `state` attribute" - - // DBClientConnectionsIdleMax is the metric conforming to the - // "db.client.connections.idle.max" semantic conventions. It represents the - // maximum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMaxName = "db.client.connections.idle.max" - DBClientConnectionsIdleMaxUnit = "{connection}" - DBClientConnectionsIdleMaxDescription = "The maximum number of idle open connections allowed" - - // DBClientConnectionsIdleMin is the metric conforming to the - // "db.client.connections.idle.min" semantic conventions. It represents the - // minimum number of idle open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsIdleMinName = "db.client.connections.idle.min" - DBClientConnectionsIdleMinUnit = "{connection}" - DBClientConnectionsIdleMinDescription = "The minimum number of idle open connections allowed" - - // DBClientConnectionsMax is the metric conforming to the - // "db.client.connections.max" semantic conventions. It represents the maximum - // number of open connections allowed. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - DBClientConnectionsMaxName = "db.client.connections.max" - DBClientConnectionsMaxUnit = "{connection}" - DBClientConnectionsMaxDescription = "The maximum number of open connections allowed" - - // DBClientConnectionsPendingRequests is the metric conforming to the - // "db.client.connections.pending_requests" semantic conventions. It represents - // the number of pending requests for an open connection, cumulative for the - // entire pool. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests" - DBClientConnectionsPendingRequestsUnit = "{request}" - DBClientConnectionsPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool" - - // DBClientConnectionsTimeouts is the metric conforming to the - // "db.client.connections.timeouts" semantic conventions. It represents the - // number of connection timeouts that have occurred trying to obtain a - // connection from the pool. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - DBClientConnectionsTimeoutsName = "db.client.connections.timeouts" - DBClientConnectionsTimeoutsUnit = "{timeout}" - DBClientConnectionsTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool" - - // DBClientConnectionsCreateTime is the metric conforming to the - // "db.client.connections.create_time" semantic conventions. It represents the - // time it took to create a new connection. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsCreateTimeName = "db.client.connections.create_time" - DBClientConnectionsCreateTimeUnit = "ms" - DBClientConnectionsCreateTimeDescription = "The time it took to create a new connection" - - // DBClientConnectionsWaitTime is the metric conforming to the - // "db.client.connections.wait_time" semantic conventions. It represents the - // time it took to obtain an open connection from the pool. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsWaitTimeName = "db.client.connections.wait_time" - DBClientConnectionsWaitTimeUnit = "ms" - DBClientConnectionsWaitTimeDescription = "The time it took to obtain an open connection from the pool" - - // DBClientConnectionsUseTime is the metric conforming to the - // "db.client.connections.use_time" semantic conventions. It represents the - // time between borrowing a connection and returning it to the pool. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - DBClientConnectionsUseTimeName = "db.client.connections.use_time" - DBClientConnectionsUseTimeUnit = "ms" - DBClientConnectionsUseTimeDescription = "The time between borrowing a connection and returning it to the pool" - - // AspnetcoreRoutingMatchAttempts is the metric conforming to the - // "aspnetcore.routing.match_attempts" semantic conventions. It represents the - // number of requests that were attempted to be matched to an endpoint. - // Instrument: counter - // Unit: {match_attempt} - // Stability: Experimental - AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts" - AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}" - AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint." - - // AspnetcoreDiagnosticsExceptions is the metric conforming to the - // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the - // number of exceptions caught by exception handling middleware. - // Instrument: counter - // Unit: {exception} - // Stability: Experimental - AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions" - AspnetcoreDiagnosticsExceptionsUnit = "{exception}" - AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware." - - // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the - // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It - // represents the number of requests that are currently active on the server - // that hold a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases" - AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}" - AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease." - - // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the - // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It - // represents the duration of rate limiting lease held by requests on the - // server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration" - AspnetcoreRateLimitingRequestLeaseDurationUnit = "s" - AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server." - - // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the - // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It - // represents the time the request spent in a queue waiting to acquire a rate - // limiting lease. - // Instrument: histogram - // Unit: s - // Stability: Experimental - AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue" - AspnetcoreRateLimitingRequestTimeInQueueUnit = "s" - AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the - // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It - // represents the number of requests that are currently queued, waiting to - // acquire a rate limiting lease. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests" - AspnetcoreRateLimitingQueuedRequestsUnit = "{request}" - AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease." - - // AspnetcoreRateLimitingRequests is the metric conforming to the - // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the - // number of requests that tried to acquire a rate limiting lease. - // Instrument: counter - // Unit: {request} - // Stability: Experimental - AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests" - AspnetcoreRateLimitingRequestsUnit = "{request}" - AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease." - - // DNSLookupDuration is the metric conforming to the "dns.lookup.duration" - // semantic conventions. It represents the measures the time taken to perform a - // DNS lookup. - // Instrument: histogram - // Unit: s - // Stability: Experimental - DNSLookupDurationName = "dns.lookup.duration" - DNSLookupDurationUnit = "s" - DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup." - - // HTTPClientOpenConnections is the metric conforming to the - // "http.client.open_connections" semantic conventions. It represents the - // number of outbound HTTP connections that are currently active or idle on the - // client. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - HTTPClientOpenConnectionsName = "http.client.open_connections" - HTTPClientOpenConnectionsUnit = "{connection}" - HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client." - - // HTTPClientConnectionDuration is the metric conforming to the - // "http.client.connection.duration" semantic conventions. It represents the - // duration of the successfully established outbound HTTP connections. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientConnectionDurationName = "http.client.connection.duration" - HTTPClientConnectionDurationUnit = "s" - HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections." - - // HTTPClientActiveRequests is the metric conforming to the - // "http.client.active_requests" semantic conventions. It represents the number - // of active HTTP requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPClientActiveRequestsName = "http.client.active_requests" - HTTPClientActiveRequestsUnit = "{request}" - HTTPClientActiveRequestsDescription = "Number of active HTTP requests." - - // HTTPClientRequestTimeInQueue is the metric conforming to the - // "http.client.request.time_in_queue" semantic conventions. It represents the - // amount of time requests spent on a queue waiting for an available - // connection. - // Instrument: histogram - // Unit: s - // Stability: Experimental - HTTPClientRequestTimeInQueueName = "http.client.request.time_in_queue" - HTTPClientRequestTimeInQueueUnit = "s" - HTTPClientRequestTimeInQueueDescription = "The amount of time requests spent on a queue waiting for an available connection." - - // KestrelActiveConnections is the metric conforming to the - // "kestrel.active_connections" semantic conventions. It represents the number - // of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - KestrelActiveConnectionsName = "kestrel.active_connections" - KestrelActiveConnectionsUnit = "{connection}" - KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // KestrelConnectionDuration is the metric conforming to the - // "kestrel.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - KestrelConnectionDurationName = "kestrel.connection.duration" - KestrelConnectionDurationUnit = "s" - KestrelConnectionDurationDescription = "The duration of connections on the server." - - // KestrelRejectedConnections is the metric conforming to the - // "kestrel.rejected_connections" semantic conventions. It represents the - // number of connections rejected by the server. - // Instrument: counter - // Unit: {connection} - // Stability: Experimental - KestrelRejectedConnectionsName = "kestrel.rejected_connections" - KestrelRejectedConnectionsUnit = "{connection}" - KestrelRejectedConnectionsDescription = "Number of connections rejected by the server." - - // KestrelQueuedConnections is the metric conforming to the - // "kestrel.queued_connections" semantic conventions. It represents the number - // of connections that are currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - KestrelQueuedConnectionsName = "kestrel.queued_connections" - KestrelQueuedConnectionsUnit = "{connection}" - KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start." - - // KestrelQueuedRequests is the metric conforming to the - // "kestrel.queued_requests" semantic conventions. It represents the number of - // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are - // currently queued and are waiting to start. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - KestrelQueuedRequestsName = "kestrel.queued_requests" - KestrelQueuedRequestsUnit = "{request}" - KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start." - - // KestrelUpgradedConnections is the metric conforming to the - // "kestrel.upgraded_connections" semantic conventions. It represents the - // number of connections that are currently upgraded (WebSockets). . - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - KestrelUpgradedConnectionsName = "kestrel.upgraded_connections" - KestrelUpgradedConnectionsUnit = "{connection}" - KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ." - - // KestrelTLSHandshakeDuration is the metric conforming to the - // "kestrel.tls_handshake.duration" semantic conventions. It represents the - // duration of TLS handshakes on the server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration" - KestrelTLSHandshakeDurationUnit = "s" - KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server." - - // KestrelActiveTLSHandshakes is the metric conforming to the - // "kestrel.active_tls_handshakes" semantic conventions. It represents the - // number of TLS handshakes that are currently in progress on the server. - // Instrument: updowncounter - // Unit: {handshake} - // Stability: Experimental - KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes" - KestrelActiveTLSHandshakesUnit = "{handshake}" - KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server." - - // SignalrServerConnectionDuration is the metric conforming to the - // "signalr.server.connection.duration" semantic conventions. It represents the - // duration of connections on the server. - // Instrument: histogram - // Unit: s - // Stability: Experimental - SignalrServerConnectionDurationName = "signalr.server.connection.duration" - SignalrServerConnectionDurationUnit = "s" - SignalrServerConnectionDurationDescription = "The duration of connections on the server." - - // SignalrServerActiveConnections is the metric conforming to the - // "signalr.server.active_connections" semantic conventions. It represents the - // number of connections that are currently active on the server. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - SignalrServerActiveConnectionsName = "signalr.server.active_connections" - SignalrServerActiveConnectionsUnit = "{connection}" - SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server." - - // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration" - // semantic conventions. It represents the measures the duration of the - // function's logic execution. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInvokeDurationName = "faas.invoke_duration" - FaaSInvokeDurationUnit = "s" - FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution" - - // FaaSInitDuration is the metric conforming to the "faas.init_duration" - // semantic conventions. It represents the measures the duration of the - // function's initialization, such as a cold start. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSInitDurationName = "faas.init_duration" - FaaSInitDurationUnit = "s" - FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start" - - // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic - // conventions. It represents the number of invocation cold starts. - // Instrument: counter - // Unit: {coldstart} - // Stability: Experimental - FaaSColdstartsName = "faas.coldstarts" - FaaSColdstartsUnit = "{coldstart}" - FaaSColdstartsDescription = "Number of invocation cold starts" - - // FaaSErrors is the metric conforming to the "faas.errors" semantic - // conventions. It represents the number of invocation errors. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - FaaSErrorsName = "faas.errors" - FaaSErrorsUnit = "{error}" - FaaSErrorsDescription = "Number of invocation errors" - - // FaaSInvocations is the metric conforming to the "faas.invocations" semantic - // conventions. It represents the number of successful invocations. - // Instrument: counter - // Unit: {invocation} - // Stability: Experimental - FaaSInvocationsName = "faas.invocations" - FaaSInvocationsUnit = "{invocation}" - FaaSInvocationsDescription = "Number of successful invocations" - - // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic - // conventions. It represents the number of invocation timeouts. - // Instrument: counter - // Unit: {timeout} - // Stability: Experimental - FaaSTimeoutsName = "faas.timeouts" - FaaSTimeoutsUnit = "{timeout}" - FaaSTimeoutsDescription = "Number of invocation timeouts" - - // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic - // conventions. It represents the distribution of max memory usage per - // invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSMemUsageName = "faas.mem_usage" - FaaSMemUsageUnit = "By" - FaaSMemUsageDescription = "Distribution of max memory usage per invocation" - - // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic - // conventions. It represents the distribution of CPU usage per invocation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - FaaSCPUUsageName = "faas.cpu_usage" - FaaSCPUUsageUnit = "s" - FaaSCPUUsageDescription = "Distribution of CPU usage per invocation" - - // FaaSNetIo is the metric conforming to the "faas.net_io" semantic - // conventions. It represents the distribution of net I/O usage per invocation. - // Instrument: histogram - // Unit: By - // Stability: Experimental - FaaSNetIoName = "faas.net_io" - FaaSNetIoUnit = "By" - FaaSNetIoDescription = "Distribution of net I/O usage per invocation" - - // HTTPServerRequestDuration is the metric conforming to the - // "http.server.request.duration" semantic conventions. It represents the - // duration of HTTP server requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPServerRequestDurationName = "http.server.request.duration" - HTTPServerRequestDurationUnit = "s" - HTTPServerRequestDurationDescription = "Duration of HTTP server requests." - - // HTTPServerActiveRequests is the metric conforming to the - // "http.server.active_requests" semantic conventions. It represents the number - // of active HTTP server requests. - // Instrument: updowncounter - // Unit: {request} - // Stability: Experimental - HTTPServerActiveRequestsName = "http.server.active_requests" - HTTPServerActiveRequestsUnit = "{request}" - HTTPServerActiveRequestsDescription = "Number of active HTTP server requests." - - // HTTPServerRequestBodySize is the metric conforming to the - // "http.server.request.body.size" semantic conventions. It represents the size - // of HTTP server request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerRequestBodySizeName = "http.server.request.body.size" - HTTPServerRequestBodySizeUnit = "By" - HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies." - - // HTTPServerResponseBodySize is the metric conforming to the - // "http.server.response.body.size" semantic conventions. It represents the - // size of HTTP server response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPServerResponseBodySizeName = "http.server.response.body.size" - HTTPServerResponseBodySizeUnit = "By" - HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies." - - // HTTPClientRequestDuration is the metric conforming to the - // "http.client.request.duration" semantic conventions. It represents the - // duration of HTTP client requests. - // Instrument: histogram - // Unit: s - // Stability: Stable - HTTPClientRequestDurationName = "http.client.request.duration" - HTTPClientRequestDurationUnit = "s" - HTTPClientRequestDurationDescription = "Duration of HTTP client requests." - - // HTTPClientRequestBodySize is the metric conforming to the - // "http.client.request.body.size" semantic conventions. It represents the size - // of HTTP client request bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientRequestBodySizeName = "http.client.request.body.size" - HTTPClientRequestBodySizeUnit = "By" - HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies." - - // HTTPClientResponseBodySize is the metric conforming to the - // "http.client.response.body.size" semantic conventions. It represents the - // size of HTTP client response bodies. - // Instrument: histogram - // Unit: By - // Stability: Experimental - HTTPClientResponseBodySizeName = "http.client.response.body.size" - HTTPClientResponseBodySizeUnit = "By" - HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies." - - // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic - // conventions. It represents the measure of initial memory requested. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmMemoryInitName = "jvm.memory.init" - JvmMemoryInitUnit = "By" - JvmMemoryInitDescription = "Measure of initial memory requested." - - // JvmSystemCPUUtilization is the metric conforming to the - // "jvm.system.cpu.utilization" semantic conventions. It represents the recent - // CPU utilization for the whole system as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization" - JvmSystemCPUUtilizationUnit = "1" - JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM." - - // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m" - // semantic conventions. It represents the average CPU load of the whole system - // for the last minute as reported by the JVM. - // Instrument: gauge - // Unit: {run_queue_item} - // Stability: Experimental - JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m" - JvmSystemCPULoad1mUnit = "{run_queue_item}" - JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM." - - // JvmBufferMemoryUsage is the metric conforming to the - // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of - // memory used by buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryUsageName = "jvm.buffer.memory.usage" - JvmBufferMemoryUsageUnit = "By" - JvmBufferMemoryUsageDescription = "Measure of memory used by buffers." - - // JvmBufferMemoryLimit is the metric conforming to the - // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of - // total memory capacity of buffers. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - JvmBufferMemoryLimitName = "jvm.buffer.memory.limit" - JvmBufferMemoryLimitUnit = "By" - JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers." - - // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic - // conventions. It represents the number of buffers in the pool. - // Instrument: updowncounter - // Unit: {buffer} - // Stability: Experimental - JvmBufferCountName = "jvm.buffer.count" - JvmBufferCountUnit = "{buffer}" - JvmBufferCountDescription = "Number of buffers in the pool." - - // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic - // conventions. It represents the measure of memory used. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedName = "jvm.memory.used" - JvmMemoryUsedUnit = "By" - JvmMemoryUsedDescription = "Measure of memory used." - - // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed" - // semantic conventions. It represents the measure of memory committed. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryCommittedName = "jvm.memory.committed" - JvmMemoryCommittedUnit = "By" - JvmMemoryCommittedDescription = "Measure of memory committed." - - // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic - // conventions. It represents the measure of max obtainable memory. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryLimitName = "jvm.memory.limit" - JvmMemoryLimitUnit = "By" - JvmMemoryLimitDescription = "Measure of max obtainable memory." - - // JvmMemoryUsedAfterLastGc is the metric conforming to the - // "jvm.memory.used_after_last_gc" semantic conventions. It represents the - // measure of memory used, as measured after the most recent garbage collection - // event on this pool. - // Instrument: updowncounter - // Unit: By - // Stability: Stable - JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc" - JvmMemoryUsedAfterLastGcUnit = "By" - JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool." - - // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic - // conventions. It represents the duration of JVM garbage collection actions. - // Instrument: histogram - // Unit: s - // Stability: Stable - JvmGcDurationName = "jvm.gc.duration" - JvmGcDurationUnit = "s" - JvmGcDurationDescription = "Duration of JVM garbage collection actions." - - // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic - // conventions. It represents the number of executing platform threads. - // Instrument: updowncounter - // Unit: {thread} - // Stability: Stable - JvmThreadCountName = "jvm.thread.count" - JvmThreadCountUnit = "{thread}" - JvmThreadCountDescription = "Number of executing platform threads." - - // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic - // conventions. It represents the number of classes loaded since JVM start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassLoadedName = "jvm.class.loaded" - JvmClassLoadedUnit = "{class}" - JvmClassLoadedDescription = "Number of classes loaded since JVM start." - - // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded" - // semantic conventions. It represents the number of classes unloaded since JVM - // start. - // Instrument: counter - // Unit: {class} - // Stability: Stable - JvmClassUnloadedName = "jvm.class.unloaded" - JvmClassUnloadedUnit = "{class}" - JvmClassUnloadedDescription = "Number of classes unloaded since JVM start." - - // JvmClassCount is the metric conforming to the "jvm.class.count" semantic - // conventions. It represents the number of classes currently loaded. - // Instrument: updowncounter - // Unit: {class} - // Stability: Stable - JvmClassCountName = "jvm.class.count" - JvmClassCountUnit = "{class}" - JvmClassCountDescription = "Number of classes currently loaded." - - // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic - // conventions. It represents the number of processors available to the Java - // virtual machine. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Stable - JvmCPUCountName = "jvm.cpu.count" - JvmCPUCountUnit = "{cpu}" - JvmCPUCountDescription = "Number of processors available to the Java virtual machine." - - // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic - // conventions. It represents the cPU time used by the process as reported by - // the JVM. - // Instrument: counter - // Unit: s - // Stability: Stable - JvmCPUTimeName = "jvm.cpu.time" - JvmCPUTimeUnit = "s" - JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM." - - // JvmCPURecentUtilization is the metric conforming to the - // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent - // CPU utilization for the process as reported by the JVM. - // Instrument: gauge - // Unit: 1 - // Stability: Stable - JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization" - JvmCPURecentUtilizationUnit = "1" - JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM." - - // MessagingPublishDuration is the metric conforming to the - // "messaging.publish.duration" semantic conventions. It represents the - // measures the duration of publish operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingPublishDurationName = "messaging.publish.duration" - MessagingPublishDurationUnit = "s" - MessagingPublishDurationDescription = "Measures the duration of publish operation." - - // MessagingReceiveDuration is the metric conforming to the - // "messaging.receive.duration" semantic conventions. It represents the - // measures the duration of receive operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingReceiveDurationName = "messaging.receive.duration" - MessagingReceiveDurationUnit = "s" - MessagingReceiveDurationDescription = "Measures the duration of receive operation." - - // MessagingDeliverDuration is the metric conforming to the - // "messaging.deliver.duration" semantic conventions. It represents the - // measures the duration of deliver operation. - // Instrument: histogram - // Unit: s - // Stability: Experimental - MessagingDeliverDurationName = "messaging.deliver.duration" - MessagingDeliverDurationUnit = "s" - MessagingDeliverDurationDescription = "Measures the duration of deliver operation." - - // MessagingPublishMessages is the metric conforming to the - // "messaging.publish.messages" semantic conventions. It represents the - // measures the number of published messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingPublishMessagesName = "messaging.publish.messages" - MessagingPublishMessagesUnit = "{message}" - MessagingPublishMessagesDescription = "Measures the number of published messages." - - // MessagingReceiveMessages is the metric conforming to the - // "messaging.receive.messages" semantic conventions. It represents the - // measures the number of received messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingReceiveMessagesName = "messaging.receive.messages" - MessagingReceiveMessagesUnit = "{message}" - MessagingReceiveMessagesDescription = "Measures the number of received messages." - - // MessagingDeliverMessages is the metric conforming to the - // "messaging.deliver.messages" semantic conventions. It represents the - // measures the number of delivered messages. - // Instrument: counter - // Unit: {message} - // Stability: Experimental - MessagingDeliverMessagesName = "messaging.deliver.messages" - MessagingDeliverMessagesUnit = "{message}" - MessagingDeliverMessagesDescription = "Measures the number of delivered messages." - - // RPCServerDuration is the metric conforming to the "rpc.server.duration" - // semantic conventions. It represents the measures the duration of inbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCServerDurationName = "rpc.server.duration" - RPCServerDurationUnit = "ms" - RPCServerDurationDescription = "Measures the duration of inbound RPC." - - // RPCServerRequestSize is the metric conforming to the - // "rpc.server.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerRequestSizeName = "rpc.server.request.size" - RPCServerRequestSizeUnit = "By" - RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCServerResponseSize is the metric conforming to the - // "rpc.server.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCServerResponseSizeName = "rpc.server.response.size" - RPCServerResponseSizeUnit = "By" - RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCServerRequestsPerRPC is the metric conforming to the - // "rpc.server.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc" - RPCServerRequestsPerRPCUnit = "{count}" - RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCServerResponsesPerRPC is the metric conforming to the - // "rpc.server.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc" - RPCServerResponsesPerRPCUnit = "{count}" - RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // RPCClientDuration is the metric conforming to the "rpc.client.duration" - // semantic conventions. It represents the measures the duration of outbound - // RPC. - // Instrument: histogram - // Unit: ms - // Stability: Experimental - RPCClientDurationName = "rpc.client.duration" - RPCClientDurationUnit = "ms" - RPCClientDurationDescription = "Measures the duration of outbound RPC." - - // RPCClientRequestSize is the metric conforming to the - // "rpc.client.request.size" semantic conventions. It represents the measures - // the size of RPC request messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientRequestSizeName = "rpc.client.request.size" - RPCClientRequestSizeUnit = "By" - RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)." - - // RPCClientResponseSize is the metric conforming to the - // "rpc.client.response.size" semantic conventions. It represents the measures - // the size of RPC response messages (uncompressed). - // Instrument: histogram - // Unit: By - // Stability: Experimental - RPCClientResponseSizeName = "rpc.client.response.size" - RPCClientResponseSizeUnit = "By" - RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)." - - // RPCClientRequestsPerRPC is the metric conforming to the - // "rpc.client.requests_per_rpc" semantic conventions. It represents the - // measures the number of messages received per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc" - RPCClientRequestsPerRPCUnit = "{count}" - RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC." - - // RPCClientResponsesPerRPC is the metric conforming to the - // "rpc.client.responses_per_rpc" semantic conventions. It represents the - // measures the number of messages sent per RPC. - // Instrument: histogram - // Unit: {count} - // Stability: Experimental - RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc" - RPCClientResponsesPerRPCUnit = "{count}" - RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC." - - // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic - // conventions. It represents the seconds each logical CPU spent on each mode. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemCPUTimeName = "system.cpu.time" - SystemCPUTimeUnit = "s" - SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode" - - // SystemCPUUtilization is the metric conforming to the - // "system.cpu.utilization" semantic conventions. It represents the difference - // in system.cpu.time since the last measurement, divided by the elapsed time - // and number of logical CPUs. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - SystemCPUUtilizationName = "system.cpu.utilization" - SystemCPUUtilizationUnit = "1" - SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs" - - // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency" - // semantic conventions. It represents the reports the current frequency of the - // CPU in Hz. - // Instrument: gauge - // Unit: {Hz} - // Stability: Experimental - SystemCPUFrequencyName = "system.cpu.frequency" - SystemCPUFrequencyUnit = "{Hz}" - SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz" - - // SystemCPUPhysicalCount is the metric conforming to the - // "system.cpu.physical.count" semantic conventions. It represents the reports - // the number of actual physical processor cores on the hardware. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPUPhysicalCountName = "system.cpu.physical.count" - SystemCPUPhysicalCountUnit = "{cpu}" - SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware" - - // SystemCPULogicalCount is the metric conforming to the - // "system.cpu.logical.count" semantic conventions. It represents the reports - // the number of logical (virtual) processor cores created by the operating - // system to manage multitasking. - // Instrument: updowncounter - // Unit: {cpu} - // Stability: Experimental - SystemCPULogicalCountName = "system.cpu.logical.count" - SystemCPULogicalCountUnit = "{cpu}" - SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking" - - // SystemMemoryUsage is the metric conforming to the "system.memory.usage" - // semantic conventions. It represents the reports memory in use by state. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryUsageName = "system.memory.usage" - SystemMemoryUsageUnit = "By" - SystemMemoryUsageDescription = "Reports memory in use by state." - - // SystemMemoryLimit is the metric conforming to the "system.memory.limit" - // semantic conventions. It represents the total memory available in the - // system. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemMemoryLimitName = "system.memory.limit" - SystemMemoryLimitUnit = "By" - SystemMemoryLimitDescription = "Total memory available in the system." - - // SystemMemoryUtilization is the metric conforming to the - // "system.memory.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemMemoryUtilizationName = "system.memory.utilization" - SystemMemoryUtilizationUnit = "1" - - // SystemPagingUsage is the metric conforming to the "system.paging.usage" - // semantic conventions. It represents the unix swap or windows pagefile usage. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemPagingUsageName = "system.paging.usage" - SystemPagingUsageUnit = "By" - SystemPagingUsageDescription = "Unix swap or windows pagefile usage" - - // SystemPagingUtilization is the metric conforming to the - // "system.paging.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingUtilizationName = "system.paging.utilization" - SystemPagingUtilizationUnit = "1" - - // SystemPagingFaults is the metric conforming to the "system.paging.faults" - // semantic conventions. - // Instrument: counter - // Unit: {fault} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingFaultsName = "system.paging.faults" - SystemPagingFaultsUnit = "{fault}" - - // SystemPagingOperations is the metric conforming to the - // "system.paging.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemPagingOperationsName = "system.paging.operations" - SystemPagingOperationsUnit = "{operation}" - - // SystemDiskIo is the metric conforming to the "system.disk.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskIoName = "system.disk.io" - SystemDiskIoUnit = "By" - - // SystemDiskOperations is the metric conforming to the - // "system.disk.operations" semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskOperationsName = "system.disk.operations" - SystemDiskOperationsUnit = "{operation}" - - // SystemDiskIoTime is the metric conforming to the "system.disk.io_time" - // semantic conventions. It represents the time disk spent activated. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskIoTimeName = "system.disk.io_time" - SystemDiskIoTimeUnit = "s" - SystemDiskIoTimeDescription = "Time disk spent activated" - - // SystemDiskOperationTime is the metric conforming to the - // "system.disk.operation_time" semantic conventions. It represents the sum of - // the time each operation took to complete. - // Instrument: counter - // Unit: s - // Stability: Experimental - SystemDiskOperationTimeName = "system.disk.operation_time" - SystemDiskOperationTimeUnit = "s" - SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete" - - // SystemDiskMerged is the metric conforming to the "system.disk.merged" - // semantic conventions. - // Instrument: counter - // Unit: {operation} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemDiskMergedName = "system.disk.merged" - SystemDiskMergedUnit = "{operation}" - - // SystemFilesystemUsage is the metric conforming to the - // "system.filesystem.usage" semantic conventions. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUsageName = "system.filesystem.usage" - SystemFilesystemUsageUnit = "By" - - // SystemFilesystemUtilization is the metric conforming to the - // "system.filesystem.utilization" semantic conventions. - // Instrument: gauge - // Unit: 1 - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemFilesystemUtilizationName = "system.filesystem.utilization" - SystemFilesystemUtilizationUnit = "1" - - // SystemNetworkDropped is the metric conforming to the - // "system.network.dropped" semantic conventions. It represents the count of - // packets that are dropped or discarded even though there was no error. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - SystemNetworkDroppedName = "system.network.dropped" - SystemNetworkDroppedUnit = "{packet}" - SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error" - - // SystemNetworkPackets is the metric conforming to the - // "system.network.packets" semantic conventions. - // Instrument: counter - // Unit: {packet} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkPacketsName = "system.network.packets" - SystemNetworkPacketsUnit = "{packet}" - - // SystemNetworkErrors is the metric conforming to the "system.network.errors" - // semantic conventions. It represents the count of network errors detected. - // Instrument: counter - // Unit: {error} - // Stability: Experimental - SystemNetworkErrorsName = "system.network.errors" - SystemNetworkErrorsUnit = "{error}" - SystemNetworkErrorsDescription = "Count of network errors detected" - - // SystemNetworkIo is the metric conforming to the "system.network.io" semantic - // conventions. - // Instrument: counter - // Unit: By - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkIoName = "system.network.io" - SystemNetworkIoUnit = "By" - - // SystemNetworkConnections is the metric conforming to the - // "system.network.connections" semantic conventions. - // Instrument: updowncounter - // Unit: {connection} - // Stability: Experimental - // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository. - SystemNetworkConnectionsName = "system.network.connections" - SystemNetworkConnectionsUnit = "{connection}" - - // SystemProcessesCount is the metric conforming to the - // "system.processes.count" semantic conventions. It represents the total - // number of processes in each state. - // Instrument: updowncounter - // Unit: {process} - // Stability: Experimental - SystemProcessesCountName = "system.processes.count" - SystemProcessesCountUnit = "{process}" - SystemProcessesCountDescription = "Total number of processes in each state" - - // SystemProcessesCreated is the metric conforming to the - // "system.processes.created" semantic conventions. It represents the total - // number of processes created over uptime of the host. - // Instrument: counter - // Unit: {process} - // Stability: Experimental - SystemProcessesCreatedName = "system.processes.created" - SystemProcessesCreatedUnit = "{process}" - SystemProcessesCreatedDescription = "Total number of processes created over uptime of the host" - - // SystemLinuxMemoryAvailable is the metric conforming to the - // "system.linux.memory.available" semantic conventions. It represents an - // estimate of how much memory is available for starting new applications, - // without causing swapping. - // Instrument: updowncounter - // Unit: By - // Stability: Experimental - SystemLinuxMemoryAvailableName = "system.linux.memory.available" - SystemLinuxMemoryAvailableUnit = "By" - SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go deleted file mode 100644 index d66bbe9c23df9..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/resource.go +++ /dev/null @@ -1,2545 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// A cloud environment (e.g. GCP, Azure, AWS). -const ( - // CloudAccountIDKey is the attribute Key conforming to the - // "cloud.account.id" semantic conventions. It represents the cloud account - // ID the resource is assigned to. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - - // CloudAvailabilityZoneKey is the attribute Key conforming to the - // "cloud.availability_zone" semantic conventions. It represents the cloud - // regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the - // resource is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google - // Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - - // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" - // semantic conventions. It represents the cloud platform in use. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") - - // CloudProviderKey is the attribute Key conforming to the "cloud.provider" - // semantic conventions. It represents the name of the cloud provider. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - CloudProviderKey = attribute.Key("cloud.provider") - - // CloudRegionKey is the attribute Key conforming to the "cloud.region" - // semantic conventions. It represents the geographical region the resource - // is running. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for - // example [Alibaba Cloud - // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS - // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), - // [Azure - // regions](https://azure.microsoft.com/global-infrastructure/geographies/), - // [Google Cloud regions](https://cloud.google.com/about/locations), or - // [Tencent Cloud - // regions](https://www.tencentcloud.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - - // CloudResourceIDKey is the attribute Key conforming to the - // "cloud.resource_id" semantic conventions. It represents the cloud - // provider-specific native identifier of the monitored cloud resource - // (e.g. an - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) - // on AWS, a [fully qualified resource - // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) - // on Azure, a [full resource - // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) - // on GCP) - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', - // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', - // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' - // Note: On some cloud providers, it may not be possible to determine the - // full ID at startup, - // so it may be necessary to set `cloud.resource_id` as a span attribute - // instead. - // - // The exact value to use for `cloud.resource_id` depends on the cloud - // provider. - // The following well-known definitions MUST be used if you set this - // attribute and they apply: - // - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias - // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) - // with the resolved function version, as the same runtime instance may - // be invokable with - // multiple different aliases. - // * **GCP:** The [URI of the - // resource](https://cloud.google.com/iam/docs/full-resource-names) - // * **Azure:** The [Fully Qualified Resource - // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id) - // of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider. - CloudResourceIDKey = attribute.Key("cloud.resource_id") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // Red Hat OpenShift on Alibaba Cloud - CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Red Hat OpenShift on AWS (ROSA) - CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Azure Red Hat OpenShift - CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") - // Google Bare Metal Solution (BMS) - CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Red Hat OpenShift on Google Cloud - CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") - // Red Hat OpenShift on IBM Cloud - CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Heroku Platform as a Service - CloudProviderHeroku = CloudProviderKey.String("heroku") - // IBM Cloud - CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -// CloudAccountID returns an attribute KeyValue conforming to the -// "cloud.account.id" semantic conventions. It represents the cloud account ID -// the resource is assigned to. -func CloudAccountID(val string) attribute.KeyValue { - return CloudAccountIDKey.String(val) -} - -// CloudAvailabilityZone returns an attribute KeyValue conforming to the -// "cloud.availability_zone" semantic conventions. It represents the cloud -// regions often have multiple, isolated locations known as zones to increase -// availability. Availability zone represents the zone where the resource is -// running. -func CloudAvailabilityZone(val string) attribute.KeyValue { - return CloudAvailabilityZoneKey.String(val) -} - -// CloudRegion returns an attribute KeyValue conforming to the -// "cloud.region" semantic conventions. It represents the geographical region -// the resource is running. -func CloudRegion(val string) attribute.KeyValue { - return CloudRegionKey.String(val) -} - -// CloudResourceID returns an attribute KeyValue conforming to the -// "cloud.resource_id" semantic conventions. It represents the cloud -// provider-specific native identifier of the monitored cloud resource (e.g. an -// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) -// on AWS, a [fully qualified resource -// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on -// Azure, a [full resource -// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) -// on GCP) -func CloudResourceID(val string) attribute.KeyValue { - return CloudResourceIDKey.String(val) -} - -// A container instance. -const ( - // ContainerCommandKey is the attribute Key conforming to the - // "container.command" semantic conventions. It represents the command used - // to run the container (i.e. the command name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol' - // Note: If using embedded credentials or sensitive data, it is recommended - // to remove them to prevent potential leakage. - ContainerCommandKey = attribute.Key("container.command") - - // ContainerCommandArgsKey is the attribute Key conforming to the - // "container.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) run by the - // container. [2] - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol, --config, config.yaml' - ContainerCommandArgsKey = attribute.Key("container.command_args") - - // ContainerCommandLineKey is the attribute Key conforming to the - // "container.command_line" semantic conventions. It represents the full - // command run by the container as a single string representing the full - // command. [2] - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcontribcol --config config.yaml' - ContainerCommandLineKey = attribute.Key("container.command_line") - - // ContainerIDKey is the attribute Key conforming to the "container.id" - // semantic conventions. It represents the container ID. Usually a UUID, as - // for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container-identification). - // The UUID might be abbreviated. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - - // ContainerImageIDKey is the attribute Key conforming to the - // "container.image.id" semantic conventions. It represents the runtime - // specific image identifier. Usually a hash algorithm followed by a UUID. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f' - // Note: Docker defines a sha256 of the image id; `container.image.id` - // corresponds to the `Image` field from the Docker container inspect - // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect) - // endpoint. - // K8S defines a link to the container registry repository with digest - // `"imageID": "registry.azurecr.io - // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`. - // The ID is assinged by the container runtime and can vary in different - // environments. Consider using `oci.manifest.digest` if it is important to - // identify the same image in different environments/runtimes. - ContainerImageIDKey = attribute.Key("container.image.id") - - // ContainerImageNameKey is the attribute Key conforming to the - // "container.image.name" semantic conventions. It represents the name of - // the image the container was built on. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - - // ContainerImageRepoDigestsKey is the attribute Key conforming to the - // "container.image.repo_digests" semantic conventions. It represents the - // repo digests of the container image as provided by the container - // runtime. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb', - // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578' - // Note: - // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect) - // and - // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238) - // report those under the `RepoDigests` field. - ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests") - - // ContainerImageTagsKey is the attribute Key conforming to the - // "container.image.tags" semantic conventions. It represents the container - // image tags. An example can be found in [Docker Image - // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). - // Should be only the `` section of the full name for example from - // `registry.example.com/my-org/my-image:`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'v1.27.1', '3.5.7-0' - ContainerImageTagsKey = attribute.Key("container.image.tags") - - // ContainerNameKey is the attribute Key conforming to the "container.name" - // semantic conventions. It represents the container name used by container - // runtime. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - - // ContainerRuntimeKey is the attribute Key conforming to the - // "container.runtime" semantic conventions. It represents the container - // runtime managing this container. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") -) - -// ContainerCommand returns an attribute KeyValue conforming to the -// "container.command" semantic conventions. It represents the command used to -// run the container (i.e. the command name). -func ContainerCommand(val string) attribute.KeyValue { - return ContainerCommandKey.String(val) -} - -// ContainerCommandArgs returns an attribute KeyValue conforming to the -// "container.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) run by the -// container. [2] -func ContainerCommandArgs(val ...string) attribute.KeyValue { - return ContainerCommandArgsKey.StringSlice(val) -} - -// ContainerCommandLine returns an attribute KeyValue conforming to the -// "container.command_line" semantic conventions. It represents the full -// command run by the container as a single string representing the full -// command. [2] -func ContainerCommandLine(val string) attribute.KeyValue { - return ContainerCommandLineKey.String(val) -} - -// ContainerID returns an attribute KeyValue conforming to the -// "container.id" semantic conventions. It represents the container ID. Usually -// a UUID, as for example used to [identify Docker -// containers](https://docs.docker.com/engine/reference/run/#container-identification). -// The UUID might be abbreviated. -func ContainerID(val string) attribute.KeyValue { - return ContainerIDKey.String(val) -} - -// ContainerImageID returns an attribute KeyValue conforming to the -// "container.image.id" semantic conventions. It represents the runtime -// specific image identifier. Usually a hash algorithm followed by a UUID. -func ContainerImageID(val string) attribute.KeyValue { - return ContainerImageIDKey.String(val) -} - -// ContainerImageName returns an attribute KeyValue conforming to the -// "container.image.name" semantic conventions. It represents the name of the -// image the container was built on. -func ContainerImageName(val string) attribute.KeyValue { - return ContainerImageNameKey.String(val) -} - -// ContainerImageRepoDigests returns an attribute KeyValue conforming to the -// "container.image.repo_digests" semantic conventions. It represents the repo -// digests of the container image as provided by the container runtime. -func ContainerImageRepoDigests(val ...string) attribute.KeyValue { - return ContainerImageRepoDigestsKey.StringSlice(val) -} - -// ContainerImageTags returns an attribute KeyValue conforming to the -// "container.image.tags" semantic conventions. It represents the container -// image tags. An example can be found in [Docker Image -// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect). -// Should be only the `` section of the full name for example from -// `registry.example.com/my-org/my-image:`. -func ContainerImageTags(val ...string) attribute.KeyValue { - return ContainerImageTagsKey.StringSlice(val) -} - -// ContainerName returns an attribute KeyValue conforming to the -// "container.name" semantic conventions. It represents the container name used -// by container runtime. -func ContainerName(val string) attribute.KeyValue { - return ContainerNameKey.String(val) -} - -// ContainerRuntime returns an attribute KeyValue conforming to the -// "container.runtime" semantic conventions. It represents the container -// runtime managing this container. -func ContainerRuntime(val string) attribute.KeyValue { - return ContainerRuntimeKey.String(val) -} - -// Describes device attributes. -const ( - // DeviceIDKey is the attribute Key conforming to the "device.id" semantic - // conventions. It represents a unique identifier representing the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values - // outlined below. This value is not an advertising identifier and MUST NOT - // be used as such. On iOS (Swift or Objective-C), this value MUST be equal - // to the [vendor - // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). - // On Android (Java or Kotlin), this value MUST be equal to the Firebase - // Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on - // best practices and exact implementation details. Caution should be taken - // when storing personal data or anything which can identify a user. GDPR - // and data protection laws may apply, ensure you do your own due - // diligence. - DeviceIDKey = attribute.Key("device.id") - - // DeviceManufacturerKey is the attribute Key conforming to the - // "device.manufacturer" semantic conventions. It represents the name of - // the device manufacturer - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") - - // DeviceModelIdentifierKey is the attribute Key conforming to the - // "device.model.identifier" semantic conventions. It represents the model - // identifier for the device - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine-readable version - // of the model identifier rather than the market or consumer-friendly name - // of the device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - - // DeviceModelNameKey is the attribute Key conforming to the - // "device.model.name" semantic conventions. It represents the marketing - // name for the device model - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human-readable version of - // the device model rather than a machine-readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") -) - -// DeviceID returns an attribute KeyValue conforming to the "device.id" -// semantic conventions. It represents a unique identifier representing the -// device -func DeviceID(val string) attribute.KeyValue { - return DeviceIDKey.String(val) -} - -// DeviceManufacturer returns an attribute KeyValue conforming to the -// "device.manufacturer" semantic conventions. It represents the name of the -// device manufacturer -func DeviceManufacturer(val string) attribute.KeyValue { - return DeviceManufacturerKey.String(val) -} - -// DeviceModelIdentifier returns an attribute KeyValue conforming to the -// "device.model.identifier" semantic conventions. It represents the model -// identifier for the device -func DeviceModelIdentifier(val string) attribute.KeyValue { - return DeviceModelIdentifierKey.String(val) -} - -// DeviceModelName returns an attribute KeyValue conforming to the -// "device.model.name" semantic conventions. It represents the marketing name -// for the device model -func DeviceModelName(val string) attribute.KeyValue { - return DeviceModelNameKey.String(val) -} - -// A host is defined as a computing instance. For example, physical servers, -// virtual machines, switches or disk array. -const ( - // HostArchKey is the attribute Key conforming to the "host.arch" semantic - // conventions. It represents the CPU architecture the host system is - // running on. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - HostArchKey = attribute.Key("host.arch") - - // HostCPUCacheL2SizeKey is the attribute Key conforming to the - // "host.cpu.cache.l2.size" semantic conventions. It represents the amount - // of level 2 memory cache available to the processor (in Bytes). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 12288000 - HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size") - - // HostCPUFamilyKey is the attribute Key conforming to the - // "host.cpu.family" semantic conventions. It represents the family or - // generation of the CPU. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', 'PA-RISC 1.1e' - HostCPUFamilyKey = attribute.Key("host.cpu.family") - - // HostCPUModelIDKey is the attribute Key conforming to the - // "host.cpu.model.id" semantic conventions. It represents the model - // identifier. It provides more granular information about the CPU, - // distinguishing it from other CPUs within the same family. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '6', '9000/778/B180L' - HostCPUModelIDKey = attribute.Key("host.cpu.model.id") - - // HostCPUModelNameKey is the attribute Key conforming to the - // "host.cpu.model.name" semantic conventions. It represents the model - // designation of the processor. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz' - HostCPUModelNameKey = attribute.Key("host.cpu.model.name") - - // HostCPUSteppingKey is the attribute Key conforming to the - // "host.cpu.stepping" semantic conventions. It represents the stepping or - // core revisions. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1 - HostCPUSteppingKey = attribute.Key("host.cpu.stepping") - - // HostCPUVendorIDKey is the attribute Key conforming to the - // "host.cpu.vendor.id" semantic conventions. It represents the processor - // manufacturer identifier. A maximum 12-character string. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'GenuineIntel' - // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor - // ID string in EBX, EDX and ECX registers. Writing these to memory in this - // order results in a 12-character string. - HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id") - - // HostIDKey is the attribute Key conforming to the "host.id" semantic - // conventions. It represents the unique host ID. For Cloud, this must be - // the instance_id assigned by the cloud provider. For non-containerized - // systems, this should be the `machine-id`. See the table below for the - // sources to use to determine the `machine-id` based on operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'fdbf79e8af94cb7f9e8df36789187052' - HostIDKey = attribute.Key("host.id") - - // HostImageIDKey is the attribute Key conforming to the "host.image.id" - // semantic conventions. It represents the vM image ID or host OS image ID. - // For Cloud, this value is from the provider. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - - // HostImageNameKey is the attribute Key conforming to the - // "host.image.name" semantic conventions. It represents the name of the VM - // image or OS install the host was instantiated from. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - - // HostImageVersionKey is the attribute Key conforming to the - // "host.image.version" semantic conventions. It represents the version - // string of the VM image or host OS as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") - - // HostIPKey is the attribute Key conforming to the "host.ip" semantic - // conventions. It represents the available IP addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e' - // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6 - // addresses MUST be specified in the [RFC - // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format. - HostIPKey = attribute.Key("host.ip") - - // HostMacKey is the attribute Key conforming to the "host.mac" semantic - // conventions. It represents the available MAC addresses of the host, - // excluding loopback interfaces. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F' - // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal - // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf): - // as hyphen-separated octets in uppercase hexadecimal form from most to - // least significant. - HostMacKey = attribute.Key("host.mac") - - // HostNameKey is the attribute Key conforming to the "host.name" semantic - // conventions. It represents the name of the host. On Unix systems, it may - // contain what the hostname command returns, or the fully qualified - // hostname, or another name specified by the user. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - - // HostTypeKey is the attribute Key conforming to the "host.type" semantic - // conventions. It represents the type of host. For Cloud, this must be the - // machine type. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// HostCPUCacheL2Size returns an attribute KeyValue conforming to the -// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of -// level 2 memory cache available to the processor (in Bytes). -func HostCPUCacheL2Size(val int) attribute.KeyValue { - return HostCPUCacheL2SizeKey.Int(val) -} - -// HostCPUFamily returns an attribute KeyValue conforming to the -// "host.cpu.family" semantic conventions. It represents the family or -// generation of the CPU. -func HostCPUFamily(val string) attribute.KeyValue { - return HostCPUFamilyKey.String(val) -} - -// HostCPUModelID returns an attribute KeyValue conforming to the -// "host.cpu.model.id" semantic conventions. It represents the model -// identifier. It provides more granular information about the CPU, -// distinguishing it from other CPUs within the same family. -func HostCPUModelID(val string) attribute.KeyValue { - return HostCPUModelIDKey.String(val) -} - -// HostCPUModelName returns an attribute KeyValue conforming to the -// "host.cpu.model.name" semantic conventions. It represents the model -// designation of the processor. -func HostCPUModelName(val string) attribute.KeyValue { - return HostCPUModelNameKey.String(val) -} - -// HostCPUStepping returns an attribute KeyValue conforming to the -// "host.cpu.stepping" semantic conventions. It represents the stepping or core -// revisions. -func HostCPUStepping(val int) attribute.KeyValue { - return HostCPUSteppingKey.Int(val) -} - -// HostCPUVendorID returns an attribute KeyValue conforming to the -// "host.cpu.vendor.id" semantic conventions. It represents the processor -// manufacturer identifier. A maximum 12-character string. -func HostCPUVendorID(val string) attribute.KeyValue { - return HostCPUVendorIDKey.String(val) -} - -// HostID returns an attribute KeyValue conforming to the "host.id" semantic -// conventions. It represents the unique host ID. For Cloud, this must be the -// instance_id assigned by the cloud provider. For non-containerized systems, -// this should be the `machine-id`. See the table below for the sources to use -// to determine the `machine-id` based on operating system. -func HostID(val string) attribute.KeyValue { - return HostIDKey.String(val) -} - -// HostImageID returns an attribute KeyValue conforming to the -// "host.image.id" semantic conventions. It represents the vM image ID or host -// OS image ID. For Cloud, this value is from the provider. -func HostImageID(val string) attribute.KeyValue { - return HostImageIDKey.String(val) -} - -// HostImageName returns an attribute KeyValue conforming to the -// "host.image.name" semantic conventions. It represents the name of the VM -// image or OS install the host was instantiated from. -func HostImageName(val string) attribute.KeyValue { - return HostImageNameKey.String(val) -} - -// HostImageVersion returns an attribute KeyValue conforming to the -// "host.image.version" semantic conventions. It represents the version string -// of the VM image or host OS as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func HostImageVersion(val string) attribute.KeyValue { - return HostImageVersionKey.String(val) -} - -// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic -// conventions. It represents the available IP addresses of the host, excluding -// loopback interfaces. -func HostIP(val ...string) attribute.KeyValue { - return HostIPKey.StringSlice(val) -} - -// HostMac returns an attribute KeyValue conforming to the "host.mac" -// semantic conventions. It represents the available MAC addresses of the host, -// excluding loopback interfaces. -func HostMac(val ...string) attribute.KeyValue { - return HostMacKey.StringSlice(val) -} - -// HostName returns an attribute KeyValue conforming to the "host.name" -// semantic conventions. It represents the name of the host. On Unix systems, -// it may contain what the hostname command returns, or the fully qualified -// hostname, or another name specified by the user. -func HostName(val string) attribute.KeyValue { - return HostNameKey.String(val) -} - -// HostType returns an attribute KeyValue conforming to the "host.type" -// semantic conventions. It represents the type of host. For Cloud, this must -// be the machine type. -func HostType(val string) attribute.KeyValue { - return HostTypeKey.String(val) -} - -// Kubernetes resource attributes. -const ( - // K8SClusterNameKey is the attribute Key conforming to the - // "k8s.cluster.name" semantic conventions. It represents the name of the - // cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") - - // K8SClusterUIDKey is the attribute Key conforming to the - // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for - // the cluster, set to the UID of the `kube-system` namespace. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d' - // Note: K8S doesn't have support for obtaining a cluster ID. If this is - // ever - // added, we will recommend collecting the `k8s.cluster.uid` through the - // official APIs. In the meantime, we are able to use the `uid` of the - // `kube-system` namespace as a proxy for cluster ID. Read on for the - // rationale. - // - // Every object created in a K8S cluster is assigned a distinct UID. The - // `kube-system` namespace is used by Kubernetes itself and will exist - // for the lifetime of the cluster. Using the `uid` of the `kube-system` - // namespace is a reasonable proxy for the K8S ClusterID as it will only - // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are - // UUIDs as standardized by - // [ISO/IEC 9834-8 and ITU-T - // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html). - // Which states: - // - // > If generated according to one of the mechanisms defined in Rec. - // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be - // different from all other UUIDs generated before 3603 A.D., or is - // extremely likely to be different (depending on the mechanism chosen). - // - // Therefore, UIDs between clusters should be extremely unlikely to - // conflict. - K8SClusterUIDKey = attribute.Key("k8s.cluster.uid") - - // K8SContainerNameKey is the attribute Key conforming to the - // "k8s.container.name" semantic conventions. It represents the name of the - // Container from Pod specification, must be unique within a Pod. Container - // runtime usually uses different globally unique name (`container.name`). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - - // K8SContainerRestartCountKey is the attribute Key conforming to the - // "k8s.container.restart_count" semantic conventions. It represents the - // number of times the container was restarted. This attribute can be used - // to identify a particular container (running or stopped) within a - // container spec. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") - - // K8SCronJobNameKey is the attribute Key conforming to the - // "k8s.cronjob.name" semantic conventions. It represents the name of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") - - // K8SCronJobUIDKey is the attribute Key conforming to the - // "k8s.cronjob.uid" semantic conventions. It represents the UID of the - // CronJob. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - - // K8SDaemonSetNameKey is the attribute Key conforming to the - // "k8s.daemonset.name" semantic conventions. It represents the name of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") - - // K8SDaemonSetUIDKey is the attribute Key conforming to the - // "k8s.daemonset.uid" semantic conventions. It represents the UID of the - // DaemonSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - - // K8SDeploymentNameKey is the attribute Key conforming to the - // "k8s.deployment.name" semantic conventions. It represents the name of - // the Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") - - // K8SDeploymentUIDKey is the attribute Key conforming to the - // "k8s.deployment.uid" semantic conventions. It represents the UID of the - // Deployment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - - // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" - // semantic conventions. It represents the name of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") - - // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" - // semantic conventions. It represents the UID of the Job. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - - // K8SNamespaceNameKey is the attribute Key conforming to the - // "k8s.namespace.name" semantic conventions. It represents the name of the - // namespace that the pod is running in. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") - - // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" - // semantic conventions. It represents the name of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - - // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" - // semantic conventions. It represents the UID of the Node. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") - - // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" - // semantic conventions. It represents the name of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") - - // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" - // semantic conventions. It represents the UID of the Pod. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - - // K8SReplicaSetNameKey is the attribute Key conforming to the - // "k8s.replicaset.name" semantic conventions. It represents the name of - // the ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") - - // K8SReplicaSetUIDKey is the attribute Key conforming to the - // "k8s.replicaset.uid" semantic conventions. It represents the UID of the - // ReplicaSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - - // K8SStatefulSetNameKey is the attribute Key conforming to the - // "k8s.statefulset.name" semantic conventions. It represents the name of - // the StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") - - // K8SStatefulSetUIDKey is the attribute Key conforming to the - // "k8s.statefulset.uid" semantic conventions. It represents the UID of the - // StatefulSet. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") -) - -// K8SClusterName returns an attribute KeyValue conforming to the -// "k8s.cluster.name" semantic conventions. It represents the name of the -// cluster. -func K8SClusterName(val string) attribute.KeyValue { - return K8SClusterNameKey.String(val) -} - -// K8SClusterUID returns an attribute KeyValue conforming to the -// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the -// cluster, set to the UID of the `kube-system` namespace. -func K8SClusterUID(val string) attribute.KeyValue { - return K8SClusterUIDKey.String(val) -} - -// K8SContainerName returns an attribute KeyValue conforming to the -// "k8s.container.name" semantic conventions. It represents the name of the -// Container from Pod specification, must be unique within a Pod. Container -// runtime usually uses different globally unique name (`container.name`). -func K8SContainerName(val string) attribute.KeyValue { - return K8SContainerNameKey.String(val) -} - -// K8SContainerRestartCount returns an attribute KeyValue conforming to the -// "k8s.container.restart_count" semantic conventions. It represents the number -// of times the container was restarted. This attribute can be used to identify -// a particular container (running or stopped) within a container spec. -func K8SContainerRestartCount(val int) attribute.KeyValue { - return K8SContainerRestartCountKey.Int(val) -} - -// K8SCronJobName returns an attribute KeyValue conforming to the -// "k8s.cronjob.name" semantic conventions. It represents the name of the -// CronJob. -func K8SCronJobName(val string) attribute.KeyValue { - return K8SCronJobNameKey.String(val) -} - -// K8SCronJobUID returns an attribute KeyValue conforming to the -// "k8s.cronjob.uid" semantic conventions. It represents the UID of the -// CronJob. -func K8SCronJobUID(val string) attribute.KeyValue { - return K8SCronJobUIDKey.String(val) -} - -// K8SDaemonSetName returns an attribute KeyValue conforming to the -// "k8s.daemonset.name" semantic conventions. It represents the name of the -// DaemonSet. -func K8SDaemonSetName(val string) attribute.KeyValue { - return K8SDaemonSetNameKey.String(val) -} - -// K8SDaemonSetUID returns an attribute KeyValue conforming to the -// "k8s.daemonset.uid" semantic conventions. It represents the UID of the -// DaemonSet. -func K8SDaemonSetUID(val string) attribute.KeyValue { - return K8SDaemonSetUIDKey.String(val) -} - -// K8SDeploymentName returns an attribute KeyValue conforming to the -// "k8s.deployment.name" semantic conventions. It represents the name of the -// Deployment. -func K8SDeploymentName(val string) attribute.KeyValue { - return K8SDeploymentNameKey.String(val) -} - -// K8SDeploymentUID returns an attribute KeyValue conforming to the -// "k8s.deployment.uid" semantic conventions. It represents the UID of the -// Deployment. -func K8SDeploymentUID(val string) attribute.KeyValue { - return K8SDeploymentUIDKey.String(val) -} - -// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" -// semantic conventions. It represents the name of the Job. -func K8SJobName(val string) attribute.KeyValue { - return K8SJobNameKey.String(val) -} - -// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" -// semantic conventions. It represents the UID of the Job. -func K8SJobUID(val string) attribute.KeyValue { - return K8SJobUIDKey.String(val) -} - -// K8SNamespaceName returns an attribute KeyValue conforming to the -// "k8s.namespace.name" semantic conventions. It represents the name of the -// namespace that the pod is running in. -func K8SNamespaceName(val string) attribute.KeyValue { - return K8SNamespaceNameKey.String(val) -} - -// K8SNodeName returns an attribute KeyValue conforming to the -// "k8s.node.name" semantic conventions. It represents the name of the Node. -func K8SNodeName(val string) attribute.KeyValue { - return K8SNodeNameKey.String(val) -} - -// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" -// semantic conventions. It represents the UID of the Node. -func K8SNodeUID(val string) attribute.KeyValue { - return K8SNodeUIDKey.String(val) -} - -// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" -// semantic conventions. It represents the name of the Pod. -func K8SPodName(val string) attribute.KeyValue { - return K8SPodNameKey.String(val) -} - -// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" -// semantic conventions. It represents the UID of the Pod. -func K8SPodUID(val string) attribute.KeyValue { - return K8SPodUIDKey.String(val) -} - -// K8SReplicaSetName returns an attribute KeyValue conforming to the -// "k8s.replicaset.name" semantic conventions. It represents the name of the -// ReplicaSet. -func K8SReplicaSetName(val string) attribute.KeyValue { - return K8SReplicaSetNameKey.String(val) -} - -// K8SReplicaSetUID returns an attribute KeyValue conforming to the -// "k8s.replicaset.uid" semantic conventions. It represents the UID of the -// ReplicaSet. -func K8SReplicaSetUID(val string) attribute.KeyValue { - return K8SReplicaSetUIDKey.String(val) -} - -// K8SStatefulSetName returns an attribute KeyValue conforming to the -// "k8s.statefulset.name" semantic conventions. It represents the name of the -// StatefulSet. -func K8SStatefulSetName(val string) attribute.KeyValue { - return K8SStatefulSetNameKey.String(val) -} - -// K8SStatefulSetUID returns an attribute KeyValue conforming to the -// "k8s.statefulset.uid" semantic conventions. It represents the UID of the -// StatefulSet. -func K8SStatefulSetUID(val string) attribute.KeyValue { - return K8SStatefulSetUIDKey.String(val) -} - -// An OCI image manifest. -const ( - // OciManifestDigestKey is the attribute Key conforming to the - // "oci.manifest.digest" semantic conventions. It represents the digest of - // the OCI image manifest. For container images specifically is the digest - // by which the container image is known. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4' - // Note: Follows [OCI Image Manifest - // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md), - // and specifically the [Digest - // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests). - // An example can be found in [Example Image - // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest). - OciManifestDigestKey = attribute.Key("oci.manifest.digest") -) - -// OciManifestDigest returns an attribute KeyValue conforming to the -// "oci.manifest.digest" semantic conventions. It represents the digest of the -// OCI image manifest. For container images specifically is the digest by which -// the container image is known. -func OciManifestDigest(val string) attribute.KeyValue { - return OciManifestDigestKey.String(val) -} - -// The operating system (OS) on which the process represented by this resource -// is running. -const ( - // OSBuildIDKey is the attribute Key conforming to the "os.build_id" - // semantic conventions. It represents the unique identifier for a - // particular build or compilation of the operating system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'TQ3C.230805.001.B2', '20E247', '22621' - OSBuildIDKey = attribute.Key("os.build_id") - - // OSDescriptionKey is the attribute Key conforming to the "os.description" - // semantic conventions. It represents the human readable (not intended to - // be parsed) OS version information, like e.g. reported by `ver` or - // `lsb_release -a` commands. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 - // LTS' - OSDescriptionKey = attribute.Key("os.description") - - // OSNameKey is the attribute Key conforming to the "os.name" semantic - // conventions. It represents the human readable operating system name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - - // OSTypeKey is the attribute Key conforming to the "os.type" semantic - // conventions. It represents the operating system type. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OSTypeKey = attribute.Key("os.type") - - // OSVersionKey is the attribute Key conforming to the "os.version" - // semantic conventions. It represents the version string of the operating - // system as defined in [Version - // Attributes](/docs/resource/README.md#version-attributes). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// OSBuildID returns an attribute KeyValue conforming to the "os.build_id" -// semantic conventions. It represents the unique identifier for a particular -// build or compilation of the operating system. -func OSBuildID(val string) attribute.KeyValue { - return OSBuildIDKey.String(val) -} - -// OSDescription returns an attribute KeyValue conforming to the -// "os.description" semantic conventions. It represents the human readable (not -// intended to be parsed) OS version information, like e.g. reported by `ver` -// or `lsb_release -a` commands. -func OSDescription(val string) attribute.KeyValue { - return OSDescriptionKey.String(val) -} - -// OSName returns an attribute KeyValue conforming to the "os.name" semantic -// conventions. It represents the human readable operating system name. -func OSName(val string) attribute.KeyValue { - return OSNameKey.String(val) -} - -// OSVersion returns an attribute KeyValue conforming to the "os.version" -// semantic conventions. It represents the version string of the operating -// system as defined in [Version -// Attributes](/docs/resource/README.md#version-attributes). -func OSVersion(val string) attribute.KeyValue { - return OSVersionKey.String(val) -} - -// An operating system process. -const ( - // ProcessCommandKey is the attribute Key conforming to the - // "process.command" semantic conventions. It represents the command used - // to launch the process (i.e. the command name). On Linux based systems, - // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can - // be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - - // ProcessCommandArgsKey is the attribute Key conforming to the - // "process.command_args" semantic conventions. It represents the all the - // command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited - // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, - // this would be the full argv vector passed to `main`. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - - // ProcessCommandLineKey is the attribute Key conforming to the - // "process.command_line" semantic conventions. It represents the full - // command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. - // Do not set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - - // ProcessExecutableNameKey is the attribute Key conforming to the - // "process.executable.name" semantic conventions. It represents the name - // of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name - // of `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - - // ProcessExecutablePathKey is the attribute Key conforming to the - // "process.executable.path" semantic conventions. It represents the full - // path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - - // ProcessOwnerKey is the attribute Key conforming to the "process.owner" - // semantic conventions. It represents the username of the user that owns - // the process. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") - - // ProcessParentPIDKey is the attribute Key conforming to the - // "process.parent_pid" semantic conventions. It represents the parent - // Process identifier (PPID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 111 - ProcessParentPIDKey = attribute.Key("process.parent_pid") - - // ProcessPIDKey is the attribute Key conforming to the "process.pid" - // semantic conventions. It represents the process identifier (PID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - - // ProcessRuntimeDescriptionKey is the attribute Key conforming to the - // "process.runtime.description" semantic conventions. It represents an - // additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") - - // ProcessRuntimeNameKey is the attribute Key conforming to the - // "process.runtime.name" semantic conventions. It represents the name of - // the runtime of this process. For compiled native binaries, this SHOULD - // be the name of the compiler. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - - // ProcessRuntimeVersionKey is the attribute Key conforming to the - // "process.runtime.version" semantic conventions. It represents the - // version of the runtime of this process, as returned by the runtime - // without modification. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") -) - -// ProcessCommand returns an attribute KeyValue conforming to the -// "process.command" semantic conventions. It represents the command used to -// launch the process (i.e. the command name). On Linux based systems, can be -// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to -// the first parameter extracted from `GetCommandLineW`. -func ProcessCommand(val string) attribute.KeyValue { - return ProcessCommandKey.String(val) -} - -// ProcessCommandArgs returns an attribute KeyValue conforming to the -// "process.command_args" semantic conventions. It represents the all the -// command arguments (including the command/executable itself) as received by -// the process. On Linux-based systems (and some other Unixoid systems -// supporting procfs), can be set according to the list of null-delimited -// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, -// this would be the full argv vector passed to `main`. -func ProcessCommandArgs(val ...string) attribute.KeyValue { - return ProcessCommandArgsKey.StringSlice(val) -} - -// ProcessCommandLine returns an attribute KeyValue conforming to the -// "process.command_line" semantic conventions. It represents the full command -// used to launch the process as a single string representing the full command. -// On Windows, can be set to the result of `GetCommandLineW`. Do not set this -// if you have to assemble it just for monitoring; use `process.command_args` -// instead. -func ProcessCommandLine(val string) attribute.KeyValue { - return ProcessCommandLineKey.String(val) -} - -// ProcessExecutableName returns an attribute KeyValue conforming to the -// "process.executable.name" semantic conventions. It represents the name of -// the process executable. On Linux based systems, can be set to the `Name` in -// `proc/[pid]/status`. On Windows, can be set to the base name of -// `GetProcessImageFileNameW`. -func ProcessExecutableName(val string) attribute.KeyValue { - return ProcessExecutableNameKey.String(val) -} - -// ProcessExecutablePath returns an attribute KeyValue conforming to the -// "process.executable.path" semantic conventions. It represents the full path -// to the process executable. On Linux based systems, can be set to the target -// of `proc/[pid]/exe`. On Windows, can be set to the result of -// `GetProcessImageFileNameW`. -func ProcessExecutablePath(val string) attribute.KeyValue { - return ProcessExecutablePathKey.String(val) -} - -// ProcessOwner returns an attribute KeyValue conforming to the -// "process.owner" semantic conventions. It represents the username of the user -// that owns the process. -func ProcessOwner(val string) attribute.KeyValue { - return ProcessOwnerKey.String(val) -} - -// ProcessParentPID returns an attribute KeyValue conforming to the -// "process.parent_pid" semantic conventions. It represents the parent Process -// identifier (PPID). -func ProcessParentPID(val int) attribute.KeyValue { - return ProcessParentPIDKey.Int(val) -} - -// ProcessPID returns an attribute KeyValue conforming to the "process.pid" -// semantic conventions. It represents the process identifier (PID). -func ProcessPID(val int) attribute.KeyValue { - return ProcessPIDKey.Int(val) -} - -// ProcessRuntimeDescription returns an attribute KeyValue conforming to the -// "process.runtime.description" semantic conventions. It represents an -// additional description about the runtime of the process, for example a -// specific vendor customization of the runtime environment. -func ProcessRuntimeDescription(val string) attribute.KeyValue { - return ProcessRuntimeDescriptionKey.String(val) -} - -// ProcessRuntimeName returns an attribute KeyValue conforming to the -// "process.runtime.name" semantic conventions. It represents the name of the -// runtime of this process. For compiled native binaries, this SHOULD be the -// name of the compiler. -func ProcessRuntimeName(val string) attribute.KeyValue { - return ProcessRuntimeNameKey.String(val) -} - -// ProcessRuntimeVersion returns an attribute KeyValue conforming to the -// "process.runtime.version" semantic conventions. It represents the version of -// the runtime of this process, as returned by the runtime without -// modification. -func ProcessRuntimeVersion(val string) attribute.KeyValue { - return ProcessRuntimeVersionKey.String(val) -} - -// The Android platform on which the Android application is running. -const ( - // AndroidOSAPILevelKey is the attribute Key conforming to the - // "android.os.api_level" semantic conventions. It represents the uniquely - // identifies the framework API revision offered by a version - // (`os.version`) of the android operating system. More information can be - // found - // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '33', '32' - AndroidOSAPILevelKey = attribute.Key("android.os.api_level") -) - -// AndroidOSAPILevel returns an attribute KeyValue conforming to the -// "android.os.api_level" semantic conventions. It represents the uniquely -// identifies the framework API revision offered by a version (`os.version`) of -// the android operating system. More information can be found -// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels). -func AndroidOSAPILevel(val string) attribute.KeyValue { - return AndroidOSAPILevelKey.String(val) -} - -// The web browser in which the application represented by the resource is -// running. The `browser.*` attributes MUST be used only for resources that -// represent applications running in a web browser (regardless of whether -// running on a mobile or desktop device). -const ( - // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" - // semantic conventions. It represents the array of brand name and version - // separated by a space - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.brands`). - BrowserBrandsKey = attribute.Key("browser.brands") - - // BrowserLanguageKey is the attribute Key conforming to the - // "browser.language" semantic conventions. It represents the preferred - // language of the user using the browser - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'en', 'en-US', 'fr', 'fr-FR' - // Note: This value is intended to be taken from the Navigator API - // `navigator.language`. - BrowserLanguageKey = attribute.Key("browser.language") - - // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" - // semantic conventions. It represents a boolean that is true if the - // browser is running on a mobile device - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.mobile`). If unavailable, this attribute - // SHOULD be left unset. - BrowserMobileKey = attribute.Key("browser.mobile") - - // BrowserPlatformKey is the attribute Key conforming to the - // "browser.platform" semantic conventions. It represents the platform on - // which the browser is running - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (`navigator.userAgentData.platform`). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute - // SHOULD be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client - // Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in - // the [`os.type` and `os.name` attributes](./os.md). However, for - // consistency, the values in the `browser.platform` attribute should - // capture the exact value that the user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") -) - -// BrowserBrands returns an attribute KeyValue conforming to the -// "browser.brands" semantic conventions. It represents the array of brand name -// and version separated by a space -func BrowserBrands(val ...string) attribute.KeyValue { - return BrowserBrandsKey.StringSlice(val) -} - -// BrowserLanguage returns an attribute KeyValue conforming to the -// "browser.language" semantic conventions. It represents the preferred -// language of the user using the browser -func BrowserLanguage(val string) attribute.KeyValue { - return BrowserLanguageKey.String(val) -} - -// BrowserMobile returns an attribute KeyValue conforming to the -// "browser.mobile" semantic conventions. It represents a boolean that is true -// if the browser is running on a mobile device -func BrowserMobile(val bool) attribute.KeyValue { - return BrowserMobileKey.Bool(val) -} - -// BrowserPlatform returns an attribute KeyValue conforming to the -// "browser.platform" semantic conventions. It represents the platform on which -// the browser is running -func BrowserPlatform(val string) attribute.KeyValue { - return BrowserPlatformKey.String(val) -} - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // AWSECSClusterARNKey is the attribute Key conforming to the - // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an - // [ECS - // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - - // AWSECSContainerARNKey is the attribute Key conforming to the - // "aws.ecs.container.arn" semantic conventions. It represents the Amazon - // Resource Name (ARN) of an [ECS container - // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - - // AWSECSLaunchtypeKey is the attribute Key conforming to the - // "aws.ecs.launchtype" semantic conventions. It represents the [launch - // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) - // for an ECS task. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - - // AWSECSTaskARNKey is the attribute Key conforming to the - // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an - // [ECS task - // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - - // AWSECSTaskFamilyKey is the attribute Key conforming to the - // "aws.ecs.task.family" semantic conventions. It represents the task - // definition family this task definition is a member of. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - - // AWSECSTaskRevisionKey is the attribute Key conforming to the - // "aws.ecs.task.revision" semantic conventions. It represents the revision - // for this task definition. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// AWSECSClusterARN returns an attribute KeyValue conforming to the -// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS -// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). -func AWSECSClusterARN(val string) attribute.KeyValue { - return AWSECSClusterARNKey.String(val) -} - -// AWSECSContainerARN returns an attribute KeyValue conforming to the -// "aws.ecs.container.arn" semantic conventions. It represents the Amazon -// Resource Name (ARN) of an [ECS container -// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). -func AWSECSContainerARN(val string) attribute.KeyValue { - return AWSECSContainerARNKey.String(val) -} - -// AWSECSTaskARN returns an attribute KeyValue conforming to the -// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS -// task -// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). -func AWSECSTaskARN(val string) attribute.KeyValue { - return AWSECSTaskARNKey.String(val) -} - -// AWSECSTaskFamily returns an attribute KeyValue conforming to the -// "aws.ecs.task.family" semantic conventions. It represents the task -// definition family this task definition is a member of. -func AWSECSTaskFamily(val string) attribute.KeyValue { - return AWSECSTaskFamilyKey.String(val) -} - -// AWSECSTaskRevision returns an attribute KeyValue conforming to the -// "aws.ecs.task.revision" semantic conventions. It represents the revision for -// this task definition. -func AWSECSTaskRevision(val string) attribute.KeyValue { - return AWSECSTaskRevisionKey.String(val) -} - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // AWSEKSClusterARNKey is the attribute Key conforming to the - // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an - // EKS cluster. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// AWSEKSClusterARN returns an attribute KeyValue conforming to the -// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS -// cluster. -func AWSEKSClusterARN(val string) attribute.KeyValue { - return AWSEKSClusterARNKey.String(val) -} - -// Resources specific to Amazon Web Services. -const ( - // AWSLogGroupARNsKey is the attribute Key conforming to the - // "aws.log.group.arns" semantic conventions. It represents the Amazon - // Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - - // AWSLogGroupNamesKey is the attribute Key conforming to the - // "aws.log.group.names" semantic conventions. It represents the name(s) of - // the AWS log group(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like - // multi-container applications, where a single application has sidecar - // containers, and each write to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - - // AWSLogStreamARNsKey is the attribute Key conforming to the - // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of - // the AWS log stream(s). - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). - // One log group can contain several log streams, so these ARNs necessarily - // identify both a log group and a log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") - - // AWSLogStreamNamesKey is the attribute Key conforming to the - // "aws.log.stream.names" semantic conventions. It represents the name(s) - // of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") -) - -// AWSLogGroupARNs returns an attribute KeyValue conforming to the -// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource -// Name(s) (ARN) of the AWS log group(s). -func AWSLogGroupARNs(val ...string) attribute.KeyValue { - return AWSLogGroupARNsKey.StringSlice(val) -} - -// AWSLogGroupNames returns an attribute KeyValue conforming to the -// "aws.log.group.names" semantic conventions. It represents the name(s) of the -// AWS log group(s) an application is writing to. -func AWSLogGroupNames(val ...string) attribute.KeyValue { - return AWSLogGroupNamesKey.StringSlice(val) -} - -// AWSLogStreamARNs returns an attribute KeyValue conforming to the -// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the -// AWS log stream(s). -func AWSLogStreamARNs(val ...string) attribute.KeyValue { - return AWSLogStreamARNsKey.StringSlice(val) -} - -// AWSLogStreamNames returns an attribute KeyValue conforming to the -// "aws.log.stream.names" semantic conventions. It represents the name(s) of -// the AWS log stream(s) an application is writing to. -func AWSLogStreamNames(val ...string) attribute.KeyValue { - return AWSLogStreamNamesKey.StringSlice(val) -} - -// Resource used by Google Cloud Run. -const ( - // GCPCloudRunJobExecutionKey is the attribute Key conforming to the - // "gcp.cloud_run.job.execution" semantic conventions. It represents the - // name of the Cloud Run - // [execution](https://cloud.google.com/run/docs/managing/job-executions) - // being run for the Job, as set by the - // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'job-name-xxxx', 'sample-job-mdw84' - GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution") - - // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the - // "gcp.cloud_run.job.task_index" semantic conventions. It represents the - // index for a task within an execution as provided by the - // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) - // environment variable. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 0, 1 - GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index") -) - -// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.execution" semantic conventions. It represents the name -// of the Cloud Run -// [execution](https://cloud.google.com/run/docs/managing/job-executions) being -// run for the Job, as set by the -// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobExecution(val string) attribute.KeyValue { - return GCPCloudRunJobExecutionKey.String(val) -} - -// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the -// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index -// for a task within an execution as provided by the -// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars) -// environment variable. -func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue { - return GCPCloudRunJobTaskIndexKey.Int(val) -} - -// Resources used by Google Compute Engine (GCE). -const ( - // GCPGceInstanceHostnameKey is the attribute Key conforming to the - // "gcp.gce.instance.hostname" semantic conventions. It represents the - // hostname of a GCE instance. This is the full value of the default or - // [custom - // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-host1234.example.com', - // 'sample-vm.us-west1-b.c.my-project.internal' - GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname") - - // GCPGceInstanceNameKey is the attribute Key conforming to the - // "gcp.gce.instance.name" semantic conventions. It represents the instance - // name of a GCE instance. This is the value provided by `host.name`, the - // visible name of the instance in the Cloud Console UI, and the prefix for - // the default hostname of the instance as defined by the [default internal - // DNS - // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'instance-1', 'my-vm-name' - GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name") -) - -// GCPGceInstanceHostname returns an attribute KeyValue conforming to the -// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname -// of a GCE instance. This is the full value of the default or [custom -// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm). -func GCPGceInstanceHostname(val string) attribute.KeyValue { - return GCPGceInstanceHostnameKey.String(val) -} - -// GCPGceInstanceName returns an attribute KeyValue conforming to the -// "gcp.gce.instance.name" semantic conventions. It represents the instance -// name of a GCE instance. This is the value provided by `host.name`, the -// visible name of the instance in the Cloud Console UI, and the prefix for the -// default hostname of the instance as defined by the [default internal DNS -// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names). -func GCPGceInstanceName(val string) attribute.KeyValue { - return GCPGceInstanceNameKey.String(val) -} - -// Heroku dyno metadata -const ( - // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" - // semantic conventions. It represents the unique identifier for the - // application - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' - HerokuAppIDKey = attribute.Key("heroku.app.id") - - // HerokuReleaseCommitKey is the attribute Key conforming to the - // "heroku.release.commit" semantic conventions. It represents the commit - // hash for the current release - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' - HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") - - // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the - // "heroku.release.creation_timestamp" semantic conventions. It represents - // the time and date the release was created - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2022-10-23T18:00:42Z' - HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") -) - -// HerokuAppID returns an attribute KeyValue conforming to the -// "heroku.app.id" semantic conventions. It represents the unique identifier -// for the application -func HerokuAppID(val string) attribute.KeyValue { - return HerokuAppIDKey.String(val) -} - -// HerokuReleaseCommit returns an attribute KeyValue conforming to the -// "heroku.release.commit" semantic conventions. It represents the commit hash -// for the current release -func HerokuReleaseCommit(val string) attribute.KeyValue { - return HerokuReleaseCommitKey.String(val) -} - -// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming -// to the "heroku.release.creation_timestamp" semantic conventions. It -// represents the time and date the release was created -func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { - return HerokuReleaseCreationTimestampKey.String(val) -} - -// The software deployment. -const ( - // DeploymentEnvironmentKey is the attribute Key conforming to the - // "deployment.environment" semantic conventions. It represents the name of - // the [deployment - // environment](https://wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'staging', 'production' - // Note: `deployment.environment` does not affect the uniqueness - // constraints defined through - // the `service.namespace`, `service.name` and `service.instance.id` - // resource attributes. - // This implies that resources carrying the following attribute - // combinations MUST be - // considered to be identifying the same service: - // - // * `service.name=frontend`, `deployment.environment=production` - // * `service.name=frontend`, `deployment.environment=staging`. - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// DeploymentEnvironment returns an attribute KeyValue conforming to the -// "deployment.environment" semantic conventions. It represents the name of the -// [deployment environment](https://wikipedia.org/wiki/Deployment_environment) -// (aka deployment tier). -func DeploymentEnvironment(val string) attribute.KeyValue { - return DeploymentEnvironmentKey.String(val) -} - -// A serverless instance. -const ( - // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" - // semantic conventions. It represents the execution environment ID as a - // string, that will be potentially reused for other invocations to the - // same function/function version. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - - // FaaSMaxMemoryKey is the attribute Key conforming to the - // "faas.max_memory" semantic conventions. It represents the amount of - // memory available to the serverless function converted to Bytes. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 134217728 - // Note: It's recommended to set this attribute since e.g. too little - // memory can easily stop a Java AWS Lambda function from working - // correctly. On AWS Lambda, the environment variable - // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must - // be multiplied by 1,048,576). - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") - - // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic - // conventions. It represents the name of the single function that this - // runtime instance executes. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the - // FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes) - // span attributes). - // - // For some cloud providers, the above definition is ambiguous. The - // following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud - // providers/products: - // - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `cloud.resource_id` attribute). - FaaSNameKey = attribute.Key("faas.name") - - // FaaSVersionKey is the attribute Key conforming to the "faas.version" - // semantic conventions. It represents the immutable version of the - // function being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - // - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run (Services):** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") -) - -// FaaSInstance returns an attribute KeyValue conforming to the -// "faas.instance" semantic conventions. It represents the execution -// environment ID as a string, that will be potentially reused for other -// invocations to the same function/function version. -func FaaSInstance(val string) attribute.KeyValue { - return FaaSInstanceKey.String(val) -} - -// FaaSMaxMemory returns an attribute KeyValue conforming to the -// "faas.max_memory" semantic conventions. It represents the amount of memory -// available to the serverless function converted to Bytes. -func FaaSMaxMemory(val int) attribute.KeyValue { - return FaaSMaxMemoryKey.Int(val) -} - -// FaaSName returns an attribute KeyValue conforming to the "faas.name" -// semantic conventions. It represents the name of the single function that -// this runtime instance executes. -func FaaSName(val string) attribute.KeyValue { - return FaaSNameKey.String(val) -} - -// FaaSVersion returns an attribute KeyValue conforming to the -// "faas.version" semantic conventions. It represents the immutable version of -// the function being executed. -func FaaSVersion(val string) attribute.KeyValue { - return FaaSVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceNameKey is the attribute Key conforming to the "service.name" - // semantic conventions. It represents the logical name of the service. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled - // services. If the value was not specified, SDKs MUST fallback to - // `unknown_service:` concatenated with - // [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, - // the value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - - // ServiceVersionKey is the attribute Key conforming to the - // "service.version" semantic conventions. It represents the version string - // of the service API or implementation. The format is not defined by these - // conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2.0.0', 'a01dbef8a' - ServiceVersionKey = attribute.Key("service.version") -) - -// ServiceName returns an attribute KeyValue conforming to the -// "service.name" semantic conventions. It represents the logical name of the -// service. -func ServiceName(val string) attribute.KeyValue { - return ServiceNameKey.String(val) -} - -// ServiceVersion returns an attribute KeyValue conforming to the -// "service.version" semantic conventions. It represents the version string of -// the service API or implementation. The format is not defined by these -// conventions. -func ServiceVersion(val string) attribute.KeyValue { - return ServiceVersionKey.String(val) -} - -// A service instance. -const ( - // ServiceInstanceIDKey is the attribute Key conforming to the - // "service.instance.id" semantic conventions. It represents the string ID - // of the service instance. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'my-k8s-pod-deployment-1', - // '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be - // globally unique). The ID helps to distinguish instances of the same - // service that exist at the same time (e.g. instances of a horizontally - // scaled service). It is preferable for the ID to be persistent and stay - // the same for the lifetime of the service instance, however it is - // acceptable that the ID is ephemeral and changes during important - // lifetime events for the service (e.g. service restarts). If the service - // has no inherent unique ID that can be used as the value of this - // attribute it is recommended to generate a random Version 1 or Version 4 - // RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - - // ServiceNamespaceKey is the attribute Key conforming to the - // "service.namespace" semantic conventions. It represents a namespace for - // `service.name`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group - // of services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` - // is expected to be unique for all services that have no explicit - // namespace defined (so the empty/unspecified namespace is simply one more - // valid namespace). Zero-length namespace string is assumed equal to - // unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") -) - -// ServiceInstanceID returns an attribute KeyValue conforming to the -// "service.instance.id" semantic conventions. It represents the string ID of -// the service instance. -func ServiceInstanceID(val string) attribute.KeyValue { - return ServiceInstanceIDKey.String(val) -} - -// ServiceNamespace returns an attribute KeyValue conforming to the -// "service.namespace" semantic conventions. It represents a namespace for -// `service.name`. -func ServiceNamespace(val string) attribute.KeyValue { - return ServiceNamespaceKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetrySDKLanguageKey is the attribute Key conforming to the - // "telemetry.sdk.language" semantic conventions. It represents the - // language of the telemetry SDK. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - - // TelemetrySDKNameKey is the attribute Key conforming to the - // "telemetry.sdk.name" semantic conventions. It represents the name of the - // telemetry SDK as defined above. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'opentelemetry' - // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute - // to `opentelemetry`. - // If another SDK, like a fork or a vendor-provided implementation, is - // used, this SDK MUST set the - // `telemetry.sdk.name` attribute to the fully-qualified class or module - // name of this SDK's main entry point - // or another suitable identifier depending on the language. - // The identifier `opentelemetry` is reserved and MUST NOT be used in this - // case. - // All custom identifiers SHOULD be stable across different versions of an - // implementation. - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - - // TelemetrySDKVersionKey is the attribute Key conforming to the - // "telemetry.sdk.version" semantic conventions. It represents the version - // string of the telemetry SDK. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // rust - TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") -) - -// TelemetrySDKName returns an attribute KeyValue conforming to the -// "telemetry.sdk.name" semantic conventions. It represents the name of the -// telemetry SDK as defined above. -func TelemetrySDKName(val string) attribute.KeyValue { - return TelemetrySDKNameKey.String(val) -} - -// TelemetrySDKVersion returns an attribute KeyValue conforming to the -// "telemetry.sdk.version" semantic conventions. It represents the version -// string of the telemetry SDK. -func TelemetrySDKVersion(val string) attribute.KeyValue { - return TelemetrySDKVersionKey.String(val) -} - -// The telemetry SDK used to capture data recorded by the instrumentation -// libraries. -const ( - // TelemetryDistroNameKey is the attribute Key conforming to the - // "telemetry.distro.name" semantic conventions. It represents the name of - // the auto instrumentation agent or distribution, if used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'parts-unlimited-java' - // Note: Official auto instrumentation agents and distributions SHOULD set - // the `telemetry.distro.name` attribute to - // a string starting with `opentelemetry-`, e.g. - // `opentelemetry-java-instrumentation`. - TelemetryDistroNameKey = attribute.Key("telemetry.distro.name") - - // TelemetryDistroVersionKey is the attribute Key conforming to the - // "telemetry.distro.version" semantic conventions. It represents the - // version string of the auto instrumentation agent or distribution, if - // used. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.2.3' - TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version") -) - -// TelemetryDistroName returns an attribute KeyValue conforming to the -// "telemetry.distro.name" semantic conventions. It represents the name of the -// auto instrumentation agent or distribution, if used. -func TelemetryDistroName(val string) attribute.KeyValue { - return TelemetryDistroNameKey.String(val) -} - -// TelemetryDistroVersion returns an attribute KeyValue conforming to the -// "telemetry.distro.version" semantic conventions. It represents the version -// string of the auto instrumentation agent or distribution, if used. -func TelemetryDistroVersion(val string) attribute.KeyValue { - return TelemetryDistroVersionKey.String(val) -} - -// Resource describing the packaged software running the application code. Web -// engines are typically executed using process.runtime. -const ( - // WebEngineDescriptionKey is the attribute Key conforming to the - // "webengine.description" semantic conventions. It represents the - // additional description of the web engine (e.g. detailed version and - // edition information). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - - // 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") - - // WebEngineNameKey is the attribute Key conforming to the "webengine.name" - // semantic conventions. It represents the name of the web engine. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - - // WebEngineVersionKey is the attribute Key conforming to the - // "webengine.version" semantic conventions. It represents the version of - // the web engine. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") -) - -// WebEngineDescription returns an attribute KeyValue conforming to the -// "webengine.description" semantic conventions. It represents the additional -// description of the web engine (e.g. detailed version and edition -// information). -func WebEngineDescription(val string) attribute.KeyValue { - return WebEngineDescriptionKey.String(val) -} - -// WebEngineName returns an attribute KeyValue conforming to the -// "webengine.name" semantic conventions. It represents the name of the web -// engine. -func WebEngineName(val string) attribute.KeyValue { - return WebEngineNameKey.String(val) -} - -// WebEngineVersion returns an attribute KeyValue conforming to the -// "webengine.version" semantic conventions. It represents the version of the -// web engine. -func WebEngineVersion(val string) attribute.KeyValue { - return WebEngineVersionKey.String(val) -} - -// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's -// concepts. -const ( - // OTelScopeNameKey is the attribute Key conforming to the - // "otel.scope.name" semantic conventions. It represents the name of the - // instrumentation scope - (`InstrumentationScope.Name` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'io.opentelemetry.contrib.mongodb' - OTelScopeNameKey = attribute.Key("otel.scope.name") - - // OTelScopeVersionKey is the attribute Key conforming to the - // "otel.scope.version" semantic conventions. It represents the version of - // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0.0' - OTelScopeVersionKey = attribute.Key("otel.scope.version") -) - -// OTelScopeName returns an attribute KeyValue conforming to the -// "otel.scope.name" semantic conventions. It represents the name of the -// instrumentation scope - (`InstrumentationScope.Name` in OTLP). -func OTelScopeName(val string) attribute.KeyValue { - return OTelScopeNameKey.String(val) -} - -// OTelScopeVersion returns an attribute KeyValue conforming to the -// "otel.scope.version" semantic conventions. It represents the version of the -// instrumentation scope - (`InstrumentationScope.Version` in OTLP). -func OTelScopeVersion(val string) attribute.KeyValue { - return OTelScopeVersionKey.String(val) -} - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry -// Scope's concepts. -const ( - // OTelLibraryNameKey is the attribute Key conforming to the - // "otel.library.name" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: 'io.opentelemetry.contrib.mongodb' - // Deprecated: use the `otel.scope.name` attribute. - OTelLibraryNameKey = attribute.Key("otel.library.name") - - // OTelLibraryVersionKey is the attribute Key conforming to the - // "otel.library.version" semantic conventions. - // - // Type: string - // RequirementLevel: Optional - // Stability: deprecated - // Examples: '1.0.0' - // Deprecated: use the `otel.scope.version` attribute. - OTelLibraryVersionKey = attribute.Key("otel.library.version") -) - -// OTelLibraryName returns an attribute KeyValue conforming to the -// "otel.library.name" semantic conventions. -// -// Deprecated: use the `otel.scope.name` attribute. -func OTelLibraryName(val string) attribute.KeyValue { - return OTelLibraryNameKey.String(val) -} - -// OTelLibraryVersion returns an attribute KeyValue conforming to the -// "otel.library.version" semantic conventions. -// -// Deprecated: use the `otel.scope.version` attribute. -func OTelLibraryVersion(val string) attribute.KeyValue { - return OTelLibraryVersionKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go deleted file mode 100644 index fe80b1731d02f..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/schema.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.24.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go deleted file mode 100644 index c1718234e5222..0000000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.24.0/trace.go +++ /dev/null @@ -1,1323 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.24.0" - -import "go.opentelemetry.io/otel/attribute" - -// Operations that access some remote service. -const ( - // PeerServiceKey is the attribute Key conforming to the "peer.service" - // semantic conventions. It represents the - // [`service.name`](/docs/resource/README.md#service) of the remote - // service. SHOULD be equal to the actual `service.name` resource attribute - // of the remote service if any. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// PeerService returns an attribute KeyValue conforming to the -// "peer.service" semantic conventions. It represents the -// [`service.name`](/docs/resource/README.md#service) of the remote service. -// SHOULD be equal to the actual `service.name` resource attribute of the -// remote service if any. -func PeerService(val string) attribute.KeyValue { - return PeerServiceKey.String(val) -} - -// These attributes may be used for any operation with an authenticated and/or -// authorized enduser. -const ( - // EnduserIDKey is the attribute Key conforming to the "enduser.id" - // semantic conventions. It represents the username or client_id extracted - // from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header - // in the inbound request from outside the system. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - - // EnduserRoleKey is the attribute Key conforming to the "enduser.role" - // semantic conventions. It represents the actual/assumed role the client - // is making the request under extracted from token or application security - // context. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - - // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" - // semantic conventions. It represents the scopes or granted authorities - // the client currently possesses extracted from token or application - // security context. The value would come from the scope associated with an - // [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute - // value in a [SAML 2.0 - // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// EnduserID returns an attribute KeyValue conforming to the "enduser.id" -// semantic conventions. It represents the username or client_id extracted from -// the access token or -// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in -// the inbound request from outside the system. -func EnduserID(val string) attribute.KeyValue { - return EnduserIDKey.String(val) -} - -// EnduserRole returns an attribute KeyValue conforming to the -// "enduser.role" semantic conventions. It represents the actual/assumed role -// the client is making the request under extracted from token or application -// security context. -func EnduserRole(val string) attribute.KeyValue { - return EnduserRoleKey.String(val) -} - -// EnduserScope returns an attribute KeyValue conforming to the -// "enduser.scope" semantic conventions. It represents the scopes or granted -// authorities the client currently possesses extracted from token or -// application security context. The value would come from the scope associated -// with an [OAuth 2.0 Access -// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute -// value in a [SAML 2.0 -// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). -func EnduserScope(val string) attribute.KeyValue { - return EnduserScopeKey.String(val) -} - -// These attributes allow to report this unit of code and therefore to provide -// more context about the span. -const ( - // CodeColumnKey is the attribute Key conforming to the "code.column" - // semantic conventions. It represents the column number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 16 - CodeColumnKey = attribute.Key("code.column") - - // CodeFilepathKey is the attribute Key conforming to the "code.filepath" - // semantic conventions. It represents the source code file name that - // identifies the code unit as uniquely as possible (preferably an absolute - // file path). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - - // CodeFunctionKey is the attribute Key conforming to the "code.function" - // semantic conventions. It represents the method or function name, or - // equivalent (usually rightmost part of the code unit's name). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - - // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" - // semantic conventions. It represents the line number in `code.filepath` - // best representing the operation. It SHOULD point within the code unit - // named in `code.function`. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") - - // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" - // semantic conventions. It represents the "namespace" within which - // `code.function` is defined. Usually the qualified class or module name, - // such that `code.namespace` + some separator + `code.function` form a - // unique identifier for the code unit. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - - // CodeStacktraceKey is the attribute Key conforming to the - // "code.stacktrace" semantic conventions. It represents a stacktrace as a - // string in the natural representation for the language runtime. The - // representation is to be determined and documented by each language SIG. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'at - // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - CodeStacktraceKey = attribute.Key("code.stacktrace") -) - -// CodeColumn returns an attribute KeyValue conforming to the "code.column" -// semantic conventions. It represents the column number in `code.filepath` -// best representing the operation. It SHOULD point within the code unit named -// in `code.function`. -func CodeColumn(val int) attribute.KeyValue { - return CodeColumnKey.Int(val) -} - -// CodeFilepath returns an attribute KeyValue conforming to the -// "code.filepath" semantic conventions. It represents the source code file -// name that identifies the code unit as uniquely as possible (preferably an -// absolute file path). -func CodeFilepath(val string) attribute.KeyValue { - return CodeFilepathKey.String(val) -} - -// CodeFunction returns an attribute KeyValue conforming to the -// "code.function" semantic conventions. It represents the method or function -// name, or equivalent (usually rightmost part of the code unit's name). -func CodeFunction(val string) attribute.KeyValue { - return CodeFunctionKey.String(val) -} - -// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" -// semantic conventions. It represents the line number in `code.filepath` best -// representing the operation. It SHOULD point within the code unit named in -// `code.function`. -func CodeLineNumber(val int) attribute.KeyValue { - return CodeLineNumberKey.Int(val) -} - -// CodeNamespace returns an attribute KeyValue conforming to the -// "code.namespace" semantic conventions. It represents the "namespace" within -// which `code.function` is defined. Usually the qualified class or module -// name, such that `code.namespace` + some separator + `code.function` form a -// unique identifier for the code unit. -func CodeNamespace(val string) attribute.KeyValue { - return CodeNamespaceKey.String(val) -} - -// CodeStacktrace returns an attribute KeyValue conforming to the -// "code.stacktrace" semantic conventions. It represents a stacktrace as a -// string in the natural representation for the language runtime. The -// representation is to be determined and documented by each language SIG. -func CodeStacktrace(val string) attribute.KeyValue { - return CodeStacktraceKey.String(val) -} - -// These attributes may be used for any operation to store information about a -// thread that started a span. -const ( - // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic - // conventions. It represents the current "managed" thread ID (as opposed - // to OS thread ID). - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - - // ThreadNameKey is the attribute Key conforming to the "thread.name" - // semantic conventions. It represents the current thread name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// ThreadID returns an attribute KeyValue conforming to the "thread.id" -// semantic conventions. It represents the current "managed" thread ID (as -// opposed to OS thread ID). -func ThreadID(val int) attribute.KeyValue { - return ThreadIDKey.Int(val) -} - -// ThreadName returns an attribute KeyValue conforming to the "thread.name" -// semantic conventions. It represents the current thread name. -func ThreadName(val string) attribute.KeyValue { - return ThreadNameKey.String(val) -} - -// Span attributes used by AWS Lambda (in addition to general `faas` -// attributes). -const ( - // AWSLambdaInvokedARNKey is the attribute Key conforming to the - // "aws.lambda.invoked_arn" semantic conventions. It represents the full - // invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the - // `/runtime/invocation/next` applicable). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `cloud.resource_id` if an alias is - // involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the -// "aws.lambda.invoked_arn" semantic conventions. It represents the full -// invoked ARN as provided on the `Context` passed to the function -// (`Lambda-Runtime-Invoked-Function-ARN` header on the -// `/runtime/invocation/next` applicable). -func AWSLambdaInvokedARN(val string) attribute.KeyValue { - return AWSLambdaInvokedARNKey.String(val) -} - -// Attributes for CloudEvents. CloudEvents is a specification on how to define -// event data in a standard way. These attributes can be attached to spans when -// performing operations with CloudEvents, regardless of the protocol being -// used. -const ( - // CloudeventsEventIDKey is the attribute Key conforming to the - // "cloudevents.event_id" semantic conventions. It represents the - // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) - // uniquely identifies the event. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - - // CloudeventsEventSourceKey is the attribute Key conforming to the - // "cloudevents.event_source" semantic conventions. It represents the - // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - // identifies the context in which an event happened. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'https://github.com/cloudevents', - // '/cloudevents/spec/pull/123', 'my-service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - - // CloudeventsEventSpecVersionKey is the attribute Key conforming to the - // "cloudevents.event_spec_version" semantic conventions. It represents the - // [version of the CloudEvents - // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) - // which the event uses. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - - // CloudeventsEventSubjectKey is the attribute Key conforming to the - // "cloudevents.event_subject" semantic conventions. It represents the - // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) - // of the event in the context of the event producer (identified by - // source). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") - - // CloudeventsEventTypeKey is the attribute Key conforming to the - // "cloudevents.event_type" semantic conventions. It represents the - // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - // contains a value describing the type of event related to the originating - // occurrence. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'com.github.pull_request.opened', - // 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") -) - -// CloudeventsEventID returns an attribute KeyValue conforming to the -// "cloudevents.event_id" semantic conventions. It represents the -// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) -// uniquely identifies the event. -func CloudeventsEventID(val string) attribute.KeyValue { - return CloudeventsEventIDKey.String(val) -} - -// CloudeventsEventSource returns an attribute KeyValue conforming to the -// "cloudevents.event_source" semantic conventions. It represents the -// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) -// identifies the context in which an event happened. -func CloudeventsEventSource(val string) attribute.KeyValue { - return CloudeventsEventSourceKey.String(val) -} - -// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to -// the "cloudevents.event_spec_version" semantic conventions. It represents the -// [version of the CloudEvents -// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) -// which the event uses. -func CloudeventsEventSpecVersion(val string) attribute.KeyValue { - return CloudeventsEventSpecVersionKey.String(val) -} - -// CloudeventsEventSubject returns an attribute KeyValue conforming to the -// "cloudevents.event_subject" semantic conventions. It represents the -// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) -// of the event in the context of the event producer (identified by source). -func CloudeventsEventSubject(val string) attribute.KeyValue { - return CloudeventsEventSubjectKey.String(val) -} - -// CloudeventsEventType returns an attribute KeyValue conforming to the -// "cloudevents.event_type" semantic conventions. It represents the -// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) -// contains a value describing the type of event related to the originating -// occurrence. -func CloudeventsEventType(val string) attribute.KeyValue { - return CloudeventsEventTypeKey.String(val) -} - -// Semantic conventions for the OpenTracing Shim -const ( - // OpentracingRefTypeKey is the attribute Key conforming to the - // "opentracing.ref_type" semantic conventions. It represents the - // parent-child Reference type - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span doesn't depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's -// concepts. -const ( - // OTelStatusCodeKey is the attribute Key conforming to the - // "otel.status_code" semantic conventions. It represents the name of the - // code, either "OK" or "ERROR". MUST NOT be set if the status code is - // UNSET. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - OTelStatusCodeKey = attribute.Key("otel.status_code") - - // OTelStatusDescriptionKey is the attribute Key conforming to the - // "otel.status_description" semantic conventions. It represents the - // description of the Status if it has a value, otherwise not set. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'resource not found' - OTelStatusDescriptionKey = attribute.Key("otel.status_description") -) - -var ( - // The operation has been validated by an Application developer or Operator to have completed successfully - OTelStatusCodeOk = OTelStatusCodeKey.String("OK") - // The operation contains an error - OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") -) - -// OTelStatusDescription returns an attribute KeyValue conforming to the -// "otel.status_description" semantic conventions. It represents the -// description of the Status if it has a value, otherwise not set. -func OTelStatusDescription(val string) attribute.KeyValue { - return OTelStatusDescriptionKey.String(val) -} - -// This semantic convention describes an instance of a function that runs -// without provisioning or managing of servers (also known as serverless -// functions or Function as a Service (FaaS)) with spans. -const ( - // FaaSInvocationIDKey is the attribute Key conforming to the - // "faas.invocation_id" semantic conventions. It represents the invocation - // ID of the current function invocation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSInvocationIDKey = attribute.Key("faas.invocation_id") -) - -// FaaSInvocationID returns an attribute KeyValue conforming to the -// "faas.invocation_id" semantic conventions. It represents the invocation ID -// of the current function invocation. -func FaaSInvocationID(val string) attribute.KeyValue { - return FaaSInvocationIDKey.String(val) -} - -// Semantic Convention for FaaS triggered as a response to some data source -// operation such as a database or filesystem read/write. -const ( - // FaaSDocumentCollectionKey is the attribute Key conforming to the - // "faas.document.collection" semantic conventions. It represents the name - // of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in - // Cosmos DB to the database name. - // - // Type: string - // RequirementLevel: Required - // Stability: experimental - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - - // FaaSDocumentNameKey is the attribute Key conforming to the - // "faas.document.name" semantic conventions. It represents the document - // name/table subjected to the operation. For example, in Cloud Storage or - // S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") - - // FaaSDocumentOperationKey is the attribute Key conforming to the - // "faas.document.operation" semantic conventions. It represents the - // describes the type of the operation that was performed on the data. - // - // Type: Enum - // RequirementLevel: Required - // Stability: experimental - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - - // FaaSDocumentTimeKey is the attribute Key conforming to the - // "faas.document.time" semantic conventions. It represents a string - // containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// FaaSDocumentCollection returns an attribute KeyValue conforming to the -// "faas.document.collection" semantic conventions. It represents the name of -// the source on which the triggering operation was performed. For example, in -// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the -// database name. -func FaaSDocumentCollection(val string) attribute.KeyValue { - return FaaSDocumentCollectionKey.String(val) -} - -// FaaSDocumentName returns an attribute KeyValue conforming to the -// "faas.document.name" semantic conventions. It represents the document -// name/table subjected to the operation. For example, in Cloud Storage or S3 -// is the name of the file, and in Cosmos DB the table name. -func FaaSDocumentName(val string) attribute.KeyValue { - return FaaSDocumentNameKey.String(val) -} - -// FaaSDocumentTime returns an attribute KeyValue conforming to the -// "faas.document.time" semantic conventions. It represents a string containing -// the time when the data was accessed in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSDocumentTime(val string) attribute.KeyValue { - return FaaSDocumentTimeKey.String(val) -} - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic - // conventions. It represents a string containing the schedule period as - // [Cron - // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") - - // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic - // conventions. It represents a string containing the function invocation - // time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format - // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") -) - -// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" -// semantic conventions. It represents a string containing the schedule period -// as [Cron -// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). -func FaaSCron(val string) attribute.KeyValue { - return FaaSCronKey.String(val) -} - -// FaaSTime returns an attribute KeyValue conforming to the "faas.time" -// semantic conventions. It represents a string containing the function -// invocation time in the [ISO -// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format -// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). -func FaaSTime(val string) attribute.KeyValue { - return FaaSTimeKey.String(val) -} - -// Contains additional attributes for incoming FaaS spans. -const ( - // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" - // semantic conventions. It represents a boolean that is true if the - // serverless function is executed for the first time (aka cold-start). - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// FaaSColdstart returns an attribute KeyValue conforming to the -// "faas.coldstart" semantic conventions. It represents a boolean that is true -// if the serverless function is executed for the first time (aka cold-start). -func FaaSColdstart(val bool) attribute.KeyValue { - return FaaSColdstartKey.Bool(val) -} - -// The `aws` conventions apply to operations using the AWS SDK. They map -// request or response parameters in AWS SDK API calls to attributes on a Span. -// The conventions have been collected over time based on feedback from AWS -// users of tracing and will continue to evolve as new interesting conventions -// are found. -// Some descriptions are also provided for populating general OpenTelemetry -// semantic conventions based on these APIs. -const ( - // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" - // semantic conventions. It represents the AWS request ID as returned in - // the response headers `x-amz-request-id` or `x-amz-requestid`. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' - AWSRequestIDKey = attribute.Key("aws.request_id") -) - -// AWSRequestID returns an attribute KeyValue conforming to the -// "aws.request_id" semantic conventions. It represents the AWS request ID as -// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. -func AWSRequestID(val string) attribute.KeyValue { - return AWSRequestIDKey.String(val) -} - -// Attributes that exist for multiple DynamoDB request types. -const ( - // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the - // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the - // value of the `AttributesToGet` request parameter. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - - // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the - // "aws.dynamodb.consistent_read" semantic conventions. It represents the - // value of the `ConsistentRead` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - - // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the - // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the - // JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { - // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": - // { "CapacityUnits": number, "ReadCapacityUnits": number, - // "WriteCapacityUnits": number }, "TableName": "string", - // "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - - // AWSDynamoDBIndexNameKey is the attribute Key conforming to the - // "aws.dynamodb.index_name" semantic conventions. It represents the value - // of the `IndexName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - - // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to - // the "aws.dynamodb.item_collection_metrics" semantic conventions. It - // represents the JSON-serialized value of the `ItemCollectionMetrics` - // response field. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": - // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { - // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], - // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, - // "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - - // AWSDynamoDBLimitKey is the attribute Key conforming to the - // "aws.dynamodb.limit" semantic conventions. It represents the value of - // the `Limit` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - - // AWSDynamoDBProjectionKey is the attribute Key conforming to the - // "aws.dynamodb.projection" semantic conventions. It represents the value - // of the `ProjectionExpression` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, - // RelatedItems, ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - - // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to - // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It - // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` - // request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - - // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming - // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. - // It represents the value of the - // `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // RequirementLevel: Optional - // Stability: experimental - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - - // AWSDynamoDBSelectKey is the attribute Key conforming to the - // "aws.dynamodb.select" semantic conventions. It represents the value of - // the `Select` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") - - // AWSDynamoDBTableNamesKey is the attribute Key conforming to the - // "aws.dynamodb.table_names" semantic conventions. It represents the keys - // in the `RequestItems` object field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") -) - -// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to -// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the -// value of the `AttributesToGet` request parameter. -func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributesToGetKey.StringSlice(val) -} - -// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the -// "aws.dynamodb.consistent_read" semantic conventions. It represents the value -// of the `ConsistentRead` request parameter. -func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { - return AWSDynamoDBConsistentReadKey.Bool(val) -} - -// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to -// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the -// JSON-serialized value of each item in the `ConsumedCapacity` response field. -func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { - return AWSDynamoDBConsumedCapacityKey.StringSlice(val) -} - -// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the -// "aws.dynamodb.index_name" semantic conventions. It represents the value of -// the `IndexName` request parameter. -func AWSDynamoDBIndexName(val string) attribute.KeyValue { - return AWSDynamoDBIndexNameKey.String(val) -} - -// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming -// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It -// represents the JSON-serialized value of the `ItemCollectionMetrics` response -// field. -func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { - return AWSDynamoDBItemCollectionMetricsKey.String(val) -} - -// AWSDynamoDBLimit returns an attribute KeyValue conforming to the -// "aws.dynamodb.limit" semantic conventions. It represents the value of the -// `Limit` request parameter. -func AWSDynamoDBLimit(val int) attribute.KeyValue { - return AWSDynamoDBLimitKey.Int(val) -} - -// AWSDynamoDBProjection returns an attribute KeyValue conforming to the -// "aws.dynamodb.projection" semantic conventions. It represents the value of -// the `ProjectionExpression` request parameter. -func AWSDynamoDBProjection(val string) attribute.KeyValue { - return AWSDynamoDBProjectionKey.String(val) -} - -// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.ReadCapacityUnits` request parameter. -func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) -} - -// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue -// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic -// conventions. It represents the value of the -// `ProvisionedThroughput.WriteCapacityUnits` request parameter. -func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { - return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) -} - -// AWSDynamoDBSelect returns an attribute KeyValue conforming to the -// "aws.dynamodb.select" semantic conventions. It represents the value of the -// `Select` request parameter. -func AWSDynamoDBSelect(val string) attribute.KeyValue { - return AWSDynamoDBSelectKey.String(val) -} - -// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_names" semantic conventions. It represents the keys in -// the `RequestItems` object field. -func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { - return AWSDynamoDBTableNamesKey.StringSlice(val) -} - -// DynamoDB.CreateTable -const ( - // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `GlobalSecondaryIndexes` request field - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": - // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ - // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - - // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to - // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It - // represents the JSON-serialized value of each item of the - // `LocalSecondaryIndexes` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "IndexARN": "string", "IndexName": "string", - // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_indexes" semantic -// conventions. It represents the JSON-serialized value of each item of the -// `GlobalSecondaryIndexes` request field -func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) -} - -// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming -// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It -// represents the JSON-serialized value of each item of the -// `LocalSecondaryIndexes` request field. -func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { - return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) -} - -// DynamoDB.ListTables -const ( - // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the - // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents - // the value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - - // AWSDynamoDBTableCountKey is the attribute Key conforming to the - // "aws.dynamodb.table_count" semantic conventions. It represents the the - // number of items in the `TableNames` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming -// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It -// represents the value of the `ExclusiveStartTableName` request parameter. -func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { - return AWSDynamoDBExclusiveStartTableKey.String(val) -} - -// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.table_count" semantic conventions. It represents the the -// number of items in the `TableNames` response parameter. -func AWSDynamoDBTableCount(val int) attribute.KeyValue { - return AWSDynamoDBTableCountKey.Int(val) -} - -// DynamoDB.Query -const ( - // AWSDynamoDBScanForwardKey is the attribute Key conforming to the - // "aws.dynamodb.scan_forward" semantic conventions. It represents the - // value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // RequirementLevel: Optional - // Stability: experimental - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the -// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of -// the `ScanIndexForward` request parameter. -func AWSDynamoDBScanForward(val bool) attribute.KeyValue { - return AWSDynamoDBScanForwardKey.Bool(val) -} - -// DynamoDB.Scan -const ( - // AWSDynamoDBCountKey is the attribute Key conforming to the - // "aws.dynamodb.count" semantic conventions. It represents the value of - // the `Count` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - - // AWSDynamoDBScannedCountKey is the attribute Key conforming to the - // "aws.dynamodb.scanned_count" semantic conventions. It represents the - // value of the `ScannedCount` response parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") - - // AWSDynamoDBSegmentKey is the attribute Key conforming to the - // "aws.dynamodb.segment" semantic conventions. It represents the value of - // the `Segment` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - - // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the - // "aws.dynamodb.total_segments" semantic conventions. It represents the - // value of the `TotalSegments` request parameter. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") -) - -// AWSDynamoDBCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.count" semantic conventions. It represents the value of the -// `Count` response parameter. -func AWSDynamoDBCount(val int) attribute.KeyValue { - return AWSDynamoDBCountKey.Int(val) -} - -// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the -// "aws.dynamodb.scanned_count" semantic conventions. It represents the value -// of the `ScannedCount` response parameter. -func AWSDynamoDBScannedCount(val int) attribute.KeyValue { - return AWSDynamoDBScannedCountKey.Int(val) -} - -// AWSDynamoDBSegment returns an attribute KeyValue conforming to the -// "aws.dynamodb.segment" semantic conventions. It represents the value of the -// `Segment` request parameter. -func AWSDynamoDBSegment(val int) attribute.KeyValue { - return AWSDynamoDBSegmentKey.Int(val) -} - -// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the -// "aws.dynamodb.total_segments" semantic conventions. It represents the value -// of the `TotalSegments` request parameter. -func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { - return AWSDynamoDBTotalSegmentsKey.Int(val) -} - -// DynamoDB.UpdateTable -const ( - // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to - // the "aws.dynamodb.attribute_definitions" semantic conventions. It - // represents the JSON-serialized value of each item in the - // `AttributeDefinitions` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - - // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key - // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic - // conventions. It represents the JSON-serialized value of each item in the - // the `GlobalSecondaryIndexUpdates` request field. - // - // Type: string[] - // RequirementLevel: Optional - // Stability: experimental - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, - // "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming -// to the "aws.dynamodb.attribute_definitions" semantic conventions. It -// represents the JSON-serialized value of each item in the -// `AttributeDefinitions` request field. -func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { - return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) -} - -// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue -// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic -// conventions. It represents the JSON-serialized value of each item in the the -// `GlobalSecondaryIndexUpdates` request field. -func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { - return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) -} - -// Attributes that exist for S3 request types. -const ( - // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" - // semantic conventions. It represents the S3 bucket name the request - // refers to. Corresponds to the `--bucket` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'some-bucket-name' - // Note: The `bucket` attribute is applicable to all S3 operations that - // reference a bucket, i.e. that require the bucket name as a mandatory - // parameter. - // This applies to almost all S3 operations except `list-buckets`. - AWSS3BucketKey = attribute.Key("aws.s3.bucket") - - // AWSS3CopySourceKey is the attribute Key conforming to the - // "aws.s3.copy_source" semantic conventions. It represents the source - // object (in the form `bucket`/`key`) for the copy operation. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `copy_source` attribute applies to S3 copy operations and - // corresponds to the `--copy-source` parameter - // of the [copy-object operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") - - // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" - // semantic conventions. It represents the delete request container that - // specifies the objects to be deleted. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: - // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' - // Note: The `delete` attribute is only applicable to the - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // operation. - // The `delete` attribute corresponds to the `--delete` parameter of the - // [delete-objects operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). - AWSS3DeleteKey = attribute.Key("aws.s3.delete") - - // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic - // conventions. It represents the S3 object key the request refers to. - // Corresponds to the `--key` parameter of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // operations. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'someFile.yml' - // Note: The `key` attribute is applicable to all object-related S3 - // operations, i.e. that require the object key as a mandatory parameter. - // This applies in particular to the following operations: - // - // - - // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) - // - - // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) - // - - // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) - // - - // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) - // - - // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) - // - - // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) - // - - // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3KeyKey = attribute.Key("aws.s3.key") - - // AWSS3PartNumberKey is the attribute Key conforming to the - // "aws.s3.part_number" semantic conventions. It represents the part number - // of the part being uploaded in a multipart-upload operation. This is a - // positive integer between 1 and 10,000. - // - // Type: int - // RequirementLevel: Optional - // Stability: experimental - // Examples: 3456 - // Note: The `part_number` attribute is only applicable to the - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // and - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - // operations. - // The `part_number` attribute corresponds to the `--part-number` parameter - // of the - // [upload-part operation within the S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). - AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") - - // AWSS3UploadIDKey is the attribute Key conforming to the - // "aws.s3.upload_id" semantic conventions. It represents the upload ID - // that identifies the multipart upload. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' - // Note: The `upload_id` attribute applies to S3 multipart-upload - // operations and corresponds to the `--upload-id` parameter - // of the [S3 - // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) - // multipart operations. - // This applies in particular to the following operations: - // - // - - // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) - // - - // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) - // - - // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) - // - - // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) - // - - // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) - AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") -) - -// AWSS3Bucket returns an attribute KeyValue conforming to the -// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the -// request refers to. Corresponds to the `--bucket` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Bucket(val string) attribute.KeyValue { - return AWSS3BucketKey.String(val) -} - -// AWSS3CopySource returns an attribute KeyValue conforming to the -// "aws.s3.copy_source" semantic conventions. It represents the source object -// (in the form `bucket`/`key`) for the copy operation. -func AWSS3CopySource(val string) attribute.KeyValue { - return AWSS3CopySourceKey.String(val) -} - -// AWSS3Delete returns an attribute KeyValue conforming to the -// "aws.s3.delete" semantic conventions. It represents the delete request -// container that specifies the objects to be deleted. -func AWSS3Delete(val string) attribute.KeyValue { - return AWSS3DeleteKey.String(val) -} - -// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" -// semantic conventions. It represents the S3 object key the request refers to. -// Corresponds to the `--key` parameter of the [S3 -// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) -// operations. -func AWSS3Key(val string) attribute.KeyValue { - return AWSS3KeyKey.String(val) -} - -// AWSS3PartNumber returns an attribute KeyValue conforming to the -// "aws.s3.part_number" semantic conventions. It represents the part number of -// the part being uploaded in a multipart-upload operation. This is a positive -// integer between 1 and 10,000. -func AWSS3PartNumber(val int) attribute.KeyValue { - return AWSS3PartNumberKey.Int(val) -} - -// AWSS3UploadID returns an attribute KeyValue conforming to the -// "aws.s3.upload_id" semantic conventions. It represents the upload ID that -// identifies the multipart upload. -func AWSS3UploadID(val string) attribute.KeyValue { - return AWSS3UploadIDKey.String(val) -} - -// Semantic conventions to apply when instrumenting the GraphQL implementation. -// They map GraphQL operations to attributes on a Span. -const ( - // GraphqlDocumentKey is the attribute Key conforming to the - // "graphql.document" semantic conventions. It represents the GraphQL - // document being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query findBookByID { bookByID(id: ?) { name } }' - // Note: The value may be sanitized to exclude sensitive information. - GraphqlDocumentKey = attribute.Key("graphql.document") - - // GraphqlOperationNameKey is the attribute Key conforming to the - // "graphql.operation.name" semantic conventions. It represents the name of - // the operation being executed. - // - // Type: string - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'findBookByID' - GraphqlOperationNameKey = attribute.Key("graphql.operation.name") - - // GraphqlOperationTypeKey is the attribute Key conforming to the - // "graphql.operation.type" semantic conventions. It represents the type of - // the operation being executed. - // - // Type: Enum - // RequirementLevel: Optional - // Stability: experimental - // Examples: 'query', 'mutation', 'subscription' - GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") -) - -var ( - // GraphQL query - GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") - // GraphQL mutation - GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") - // GraphQL subscription - GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") -) - -// GraphqlDocument returns an attribute KeyValue conforming to the -// "graphql.document" semantic conventions. It represents the GraphQL document -// being executed. -func GraphqlDocument(val string) attribute.KeyValue { - return GraphqlDocumentKey.String(val) -} - -// GraphqlOperationName returns an attribute KeyValue conforming to the -// "graphql.operation.name" semantic conventions. It represents the name of the -// operation being executed. -func GraphqlOperationName(val string) attribute.KeyValue { - return GraphqlOperationNameKey.String(val) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go new file mode 100644 index 0000000000000..ef85cb70c6d8b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/provider.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import "go.opentelemetry.io/otel/trace/embedded" + +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type TracerProvider interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.TracerProvider + + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. + // + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go new file mode 100644 index 0000000000000..d3aa476ee1254 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/span.go @@ -0,0 +1,177 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace/embedded" +) + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Span interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Span + + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // AddLink adds a link. + // Adding links at span creation using WithLinks is preferred to calling AddLink + // later, for contexts that are available during span creation, because head + // sampling decisions can only consider information present during span creation. + AddLink(link Link) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided +// ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 28877d4ab4dff..d49adf671b954 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -5,13 +5,8 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "bytes" - "context" "encoding/hex" "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace/embedded" ) const ( @@ -326,247 +321,3 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) { Remote: sc.remote, }) } - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Span interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Span - - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // AddLink adds a link. - // Adding links at span creation using WithLinks is preferred to calling AddLink - // later, for contexts that are available during span creation, because head - // sampling decisions can only consider information present during span creation. - AddLink(link Link) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type Tracer interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.Tracer - - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: Methods may be added to this interface in minor releases. See -// package documentation on API implementation for information on how to set -// default behavior for unimplemented methods. -type TracerProvider interface { - // Users of the interface can ignore this. This embedded type is only used - // by implementations of this interface. See the "API Implementations" - // section of the package documentation for more information. - embedded.TracerProvider - - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go new file mode 100644 index 0000000000000..77952d2a0b310 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/trace/embedded" +) + +// Tracer is the creator of Spans. +// +// Warning: Methods may be added to this interface in minor releases. See +// package documentation on API implementation for information on how to set +// default behavior for unimplemented methods. +type Tracer interface { + // Users of the interface can ignore this. This embedded type is only used + // by implementations of this interface. See the "API Implementations" + // section of the package documentation for more information. + embedded.Tracer + + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 20b5cf24332da..dc5e34cad0dd4 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -260,6 +260,16 @@ func (ts TraceState) Get(key string) string { return "" } +// Walk walks all key value pairs in the TraceState by calling f +// Iteration stops if f returns false. +func (ts TraceState) Walk(f func(key, value string) bool) { + for _, m := range ts.list { + if !f(m.Key, m.Value) { + break + } + } +} + // Insert adds a new list-member defined by the key/value pair to the // TraceState. If a list-member already exists for the given key, that // list-member's value is updated. The new or updated list-member is always diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh new file mode 100644 index 0000000000000..c9b7cdbbfef7e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# SPDX-License-Identifier: Apache-2.0 + +set -euo pipefail + +TARGET="${1:?Must provide target ref}" + +FILE="CHANGELOG.md" +TEMP_DIR=$(mktemp -d) +echo "Temp folder: $TEMP_DIR" + +# Only the latest commit of the feature branch is available +# automatically. To diff with the base branch, we need to +# fetch that too (and we only need its latest commit). +git fetch origin "${TARGET}" --depth=1 + +# Checkout the previous version on the base branch of the changelog to tmpfolder +git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE + +PREVIOUS_FILE="$TEMP_DIR/$FILE" +CURRENT_FILE="$FILE" +PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md" +CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md" + +# Extract released sections from the previous version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE" + +# Extract released sections from the current version +awk '/^/ {flag=1} /^/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE" + +# Compare the released sections +if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then + echo "Error: The released sections of the changelog file have been modified." + diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE" + rm -rf "$TEMP_DIR" + false +fi + +rm -rf "$TEMP_DIR" +echo "The released sections remain unchanged." diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index ab28960524b81..f67039ed1f90b 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.28.0" + return "1.29.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 241cfc82a8d0e..3ba611d71362c 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.28.0 + version: v1.29.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,15 +29,16 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.50.0 + version: v0.51.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.4.0 + version: v0.5.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log + - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: @@ -46,4 +47,3 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools - - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s index 6713accac09bf..c3895478ed0c5 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -1,243 +1,2791 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blamka_amd64.go -out ../blamka_amd64.s -pkg argon2. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFD $0xB1, v6, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - PSHUFB c40, v2; \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFB c48, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - MOVO v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v7, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - PSHUFB c40, v3; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFB c48, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - MOVO v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG_0(block, off) \ - MOVOU 8*(off+0)(block), X0; \ - MOVOU 8*(off+2)(block), X1; \ - MOVOU 8*(off+4)(block), X2; \ - MOVOU 8*(off+6)(block), X3; \ - MOVOU 8*(off+8)(block), X4; \ - MOVOU 8*(off+10)(block), X5; \ - MOVOU 8*(off+12)(block), X6; \ - MOVOU 8*(off+14)(block), X7 - -#define STORE_MSG_0(block, off) \ - MOVOU X0, 8*(off+0)(block); \ - MOVOU X1, 8*(off+2)(block); \ - MOVOU X2, 8*(off+4)(block); \ - MOVOU X3, 8*(off+6)(block); \ - MOVOU X4, 8*(off+8)(block); \ - MOVOU X5, 8*(off+10)(block); \ - MOVOU X6, 8*(off+12)(block); \ - MOVOU X7, 8*(off+14)(block) - -#define LOAD_MSG_1(block, off) \ - MOVOU 8*off+0*8(block), X0; \ - MOVOU 8*off+16*8(block), X1; \ - MOVOU 8*off+32*8(block), X2; \ - MOVOU 8*off+48*8(block), X3; \ - MOVOU 8*off+64*8(block), X4; \ - MOVOU 8*off+80*8(block), X5; \ - MOVOU 8*off+96*8(block), X6; \ - MOVOU 8*off+112*8(block), X7 - -#define STORE_MSG_1(block, off) \ - MOVOU X0, 8*off+0*8(block); \ - MOVOU X1, 8*off+16*8(block); \ - MOVOU X2, 8*off+32*8(block); \ - MOVOU X3, 8*off+48*8(block); \ - MOVOU X4, 8*off+64*8(block); \ - MOVOU X5, 8*off+80*8(block); \ - MOVOU X6, 8*off+96*8(block); \ - MOVOU X7, 8*off+112*8(block) - -#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ - LOAD_MSG_0(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_0(block, off) - -#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ - LOAD_MSG_1(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_1(block, off) - // func blamkaSSE4(b *block) -TEXT ·blamkaSSE4(SB), 4, $0-8 - MOVQ b+0(FP), AX - - MOVOU ·c40<>(SB), X10 - MOVOU ·c48<>(SB), X11 +// Requires: SSE2, SSSE3 +TEXT ·blamkaSSE4(SB), NOSPLIT, $0-8 + MOVQ b+0(FP), AX + MOVOU ·c40<>+0(SB), X10 + MOVOU ·c48<>+0(SB), X11 + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU 64(AX), X4 + MOVOU 80(AX), X5 + MOVOU 96(AX), X6 + MOVOU 112(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, 32(AX) + MOVOU X3, 48(AX) + MOVOU X4, 64(AX) + MOVOU X5, 80(AX) + MOVOU X6, 96(AX) + MOVOU X7, 112(AX) + MOVOU 128(AX), X0 + MOVOU 144(AX), X1 + MOVOU 160(AX), X2 + MOVOU 176(AX), X3 + MOVOU 192(AX), X4 + MOVOU 208(AX), X5 + MOVOU 224(AX), X6 + MOVOU 240(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 128(AX) + MOVOU X1, 144(AX) + MOVOU X2, 160(AX) + MOVOU X3, 176(AX) + MOVOU X4, 192(AX) + MOVOU X5, 208(AX) + MOVOU X6, 224(AX) + MOVOU X7, 240(AX) + MOVOU 256(AX), X0 + MOVOU 272(AX), X1 + MOVOU 288(AX), X2 + MOVOU 304(AX), X3 + MOVOU 320(AX), X4 + MOVOU 336(AX), X5 + MOVOU 352(AX), X6 + MOVOU 368(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 256(AX) + MOVOU X1, 272(AX) + MOVOU X2, 288(AX) + MOVOU X3, 304(AX) + MOVOU X4, 320(AX) + MOVOU X5, 336(AX) + MOVOU X6, 352(AX) + MOVOU X7, 368(AX) + MOVOU 384(AX), X0 + MOVOU 400(AX), X1 + MOVOU 416(AX), X2 + MOVOU 432(AX), X3 + MOVOU 448(AX), X4 + MOVOU 464(AX), X5 + MOVOU 480(AX), X6 + MOVOU 496(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 384(AX) + MOVOU X1, 400(AX) + MOVOU X2, 416(AX) + MOVOU X3, 432(AX) + MOVOU X4, 448(AX) + MOVOU X5, 464(AX) + MOVOU X6, 480(AX) + MOVOU X7, 496(AX) + MOVOU 512(AX), X0 + MOVOU 528(AX), X1 + MOVOU 544(AX), X2 + MOVOU 560(AX), X3 + MOVOU 576(AX), X4 + MOVOU 592(AX), X5 + MOVOU 608(AX), X6 + MOVOU 624(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 512(AX) + MOVOU X1, 528(AX) + MOVOU X2, 544(AX) + MOVOU X3, 560(AX) + MOVOU X4, 576(AX) + MOVOU X5, 592(AX) + MOVOU X6, 608(AX) + MOVOU X7, 624(AX) + MOVOU 640(AX), X0 + MOVOU 656(AX), X1 + MOVOU 672(AX), X2 + MOVOU 688(AX), X3 + MOVOU 704(AX), X4 + MOVOU 720(AX), X5 + MOVOU 736(AX), X6 + MOVOU 752(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 640(AX) + MOVOU X1, 656(AX) + MOVOU X2, 672(AX) + MOVOU X3, 688(AX) + MOVOU X4, 704(AX) + MOVOU X5, 720(AX) + MOVOU X6, 736(AX) + MOVOU X7, 752(AX) + MOVOU 768(AX), X0 + MOVOU 784(AX), X1 + MOVOU 800(AX), X2 + MOVOU 816(AX), X3 + MOVOU 832(AX), X4 + MOVOU 848(AX), X5 + MOVOU 864(AX), X6 + MOVOU 880(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 768(AX) + MOVOU X1, 784(AX) + MOVOU X2, 800(AX) + MOVOU X3, 816(AX) + MOVOU X4, 832(AX) + MOVOU X5, 848(AX) + MOVOU X6, 864(AX) + MOVOU X7, 880(AX) + MOVOU 896(AX), X0 + MOVOU 912(AX), X1 + MOVOU 928(AX), X2 + MOVOU 944(AX), X3 + MOVOU 960(AX), X4 + MOVOU 976(AX), X5 + MOVOU 992(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 896(AX) + MOVOU X1, 912(AX) + MOVOU X2, 928(AX) + MOVOU X3, 944(AX) + MOVOU X4, 960(AX) + MOVOU X5, 976(AX) + MOVOU X6, 992(AX) + MOVOU X7, 1008(AX) + MOVOU (AX), X0 + MOVOU 128(AX), X1 + MOVOU 256(AX), X2 + MOVOU 384(AX), X3 + MOVOU 512(AX), X4 + MOVOU 640(AX), X5 + MOVOU 768(AX), X6 + MOVOU 896(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 128(AX) + MOVOU X2, 256(AX) + MOVOU X3, 384(AX) + MOVOU X4, 512(AX) + MOVOU X5, 640(AX) + MOVOU X6, 768(AX) + MOVOU X7, 896(AX) + MOVOU 16(AX), X0 + MOVOU 144(AX), X1 + MOVOU 272(AX), X2 + MOVOU 400(AX), X3 + MOVOU 528(AX), X4 + MOVOU 656(AX), X5 + MOVOU 784(AX), X6 + MOVOU 912(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 16(AX) + MOVOU X1, 144(AX) + MOVOU X2, 272(AX) + MOVOU X3, 400(AX) + MOVOU X4, 528(AX) + MOVOU X5, 656(AX) + MOVOU X6, 784(AX) + MOVOU X7, 912(AX) + MOVOU 32(AX), X0 + MOVOU 160(AX), X1 + MOVOU 288(AX), X2 + MOVOU 416(AX), X3 + MOVOU 544(AX), X4 + MOVOU 672(AX), X5 + MOVOU 800(AX), X6 + MOVOU 928(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 32(AX) + MOVOU X1, 160(AX) + MOVOU X2, 288(AX) + MOVOU X3, 416(AX) + MOVOU X4, 544(AX) + MOVOU X5, 672(AX) + MOVOU X6, 800(AX) + MOVOU X7, 928(AX) + MOVOU 48(AX), X0 + MOVOU 176(AX), X1 + MOVOU 304(AX), X2 + MOVOU 432(AX), X3 + MOVOU 560(AX), X4 + MOVOU 688(AX), X5 + MOVOU 816(AX), X6 + MOVOU 944(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 48(AX) + MOVOU X1, 176(AX) + MOVOU X2, 304(AX) + MOVOU X3, 432(AX) + MOVOU X4, 560(AX) + MOVOU X5, 688(AX) + MOVOU X6, 816(AX) + MOVOU X7, 944(AX) + MOVOU 64(AX), X0 + MOVOU 192(AX), X1 + MOVOU 320(AX), X2 + MOVOU 448(AX), X3 + MOVOU 576(AX), X4 + MOVOU 704(AX), X5 + MOVOU 832(AX), X6 + MOVOU 960(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 64(AX) + MOVOU X1, 192(AX) + MOVOU X2, 320(AX) + MOVOU X3, 448(AX) + MOVOU X4, 576(AX) + MOVOU X5, 704(AX) + MOVOU X6, 832(AX) + MOVOU X7, 960(AX) + MOVOU 80(AX), X0 + MOVOU 208(AX), X1 + MOVOU 336(AX), X2 + MOVOU 464(AX), X3 + MOVOU 592(AX), X4 + MOVOU 720(AX), X5 + MOVOU 848(AX), X6 + MOVOU 976(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 80(AX) + MOVOU X1, 208(AX) + MOVOU X2, 336(AX) + MOVOU X3, 464(AX) + MOVOU X4, 592(AX) + MOVOU X5, 720(AX) + MOVOU X6, 848(AX) + MOVOU X7, 976(AX) + MOVOU 96(AX), X0 + MOVOU 224(AX), X1 + MOVOU 352(AX), X2 + MOVOU 480(AX), X3 + MOVOU 608(AX), X4 + MOVOU 736(AX), X5 + MOVOU 864(AX), X6 + MOVOU 992(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 96(AX) + MOVOU X1, 224(AX) + MOVOU X2, 352(AX) + MOVOU X3, 480(AX) + MOVOU X4, 608(AX) + MOVOU X5, 736(AX) + MOVOU X6, 864(AX) + MOVOU X7, 992(AX) + MOVOU 112(AX), X0 + MOVOU 240(AX), X1 + MOVOU 368(AX), X2 + MOVOU 496(AX), X3 + MOVOU 624(AX), X4 + MOVOU 752(AX), X5 + MOVOU 880(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 112(AX) + MOVOU X1, 240(AX) + MOVOU X2, 368(AX) + MOVOU X3, 496(AX) + MOVOU X4, 624(AX) + MOVOU X5, 752(AX) + MOVOU X6, 880(AX) + MOVOU X7, 1008(AX) + RET - BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 - BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) - RET +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 -// func mixBlocksSSE2(out, a, b, c *block) -TEXT ·mixBlocksSSE2(SB), 4, $0-32 +// func mixBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·mixBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 PXOR X1, X0 PXOR X2, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET -// func xorBlocksSSE2(out, a, b, c *block) -TEXT ·xorBlocksSSE2(SB), 4, $0-32 +// func xorBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·xorBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 - MOVOU 0(DX), X3 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 + MOVOU (DX), X3 PXOR X1, X0 PXOR X2, X0 PXOR X3, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 9ae8206c20165..f75162e039c10 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -1,722 +1,4517 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2bAVX2_amd64_asm.go -out ../../blake2bAVX2_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 - -#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 -#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 -#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e -#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 -#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 - -#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ - VPADDQ m0, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m1, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y1_Y1; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y3_Y3; \ - VPADDQ m2, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m3, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y3_Y3; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y1_Y1 - -#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E -#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 -#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E -#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 -#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E - -#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n -#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n -#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n -#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n -#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n - -#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 -#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 -#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 -#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 -#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 - -#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 - -#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 -#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 - -// load msg: Y12 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y12, Y12 - -// load msg: Y13 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ - VMOVQ_SI_X13(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X13(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y13, Y13 - -// load msg: Y14 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ - VMOVQ_SI_X14(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X14(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y14, Y14 - -// load msg: Y15 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ - VMOVQ_SI_X15(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X15(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X11(6*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ - LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ - LOAD_MSG_AVX2_Y15(9, 11, 13, 15) - -#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ - LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ - LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ - VMOVQ_SI_X11(11*8); \ - VPSHUFD $0x4E, 0*8(SI), X14; \ - VPINSRQ_1_SI_X11(5*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(12, 2, 7, 3) - -#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ - VMOVQ_SI_X11(5*8); \ - VMOVDQU 11*8(SI), X12; \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - VMOVQ_SI_X13(8*8); \ - VMOVQ_SI_X11(2*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X11(13*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ - LOAD_MSG_AVX2_Y15(14, 6, 1, 4) - -#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ - LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ - LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ - LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ - VMOVQ_SI_X15(6*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X15(10*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ - LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X13(7*8); \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ - LOAD_MSG_AVX2_Y15(1, 12, 8, 13) - -#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ - LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ - LOAD_MSG_AVX2_Y15(13, 5, 14, 9) - -#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ - LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ - LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ - VMOVQ_SI_X14_0; \ - VPSHUFD $0x4E, 8*8(SI), X11; \ - VPINSRQ_1_SI_X14(6*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(7, 3, 2, 11) - -#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ - LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ - LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ - LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ - VMOVQ_SI_X15_0; \ - VMOVQ_SI_X11(6*8); \ - VPINSRQ_1_SI_X15(4*8); \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ - VMOVQ_SI_X12(6*8); \ - VMOVQ_SI_X11(11*8); \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ - VMOVQ_SI_X11(1*8); \ - VMOVDQU 12*8(SI), X14; \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - VMOVQ_SI_X15(2*8); \ - VMOVDQU 4*8(SI), X11; \ - VPINSRQ_1_SI_X15(7*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ - LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ - VMOVQ_SI_X13(2*8); \ - VPSHUFD $0x4E, 5*8(SI), X11; \ - VPINSRQ_1_SI_X13(4*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ - VMOVQ_SI_X15(11*8); \ - VMOVQ_SI_X11(12*8); \ - VPINSRQ_1_SI_X15(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y15, Y15 - // func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, DX - ADDQ $31, DX - ANDQ $~31, DX - - MOVQ CX, 16(DX) - XORQ CX, CX - MOVQ CX, 24(DX) - - VMOVDQU ·AVX2_c40<>(SB), Y4 - VMOVDQU ·AVX2_c48<>(SB), Y5 - - VMOVDQU 0(AX), Y8 +// Requires: AVX, AVX2 +TEXT ·hashBlocksAVX2(SB), NOSPLIT, $320-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, DX + ADDQ $+31, DX + ANDQ $-32, DX + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + VMOVDQU ·AVX2_c40<>+0(SB), Y4 + VMOVDQU ·AVX2_c48<>+0(SB), Y5 + VMOVDQU (AX), Y8 VMOVDQU 32(AX), Y9 - VMOVDQU ·AVX2_iv0<>(SB), Y6 - VMOVDQU ·AVX2_iv1<>(SB), Y7 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - MOVQ R9, 8(DX) + VMOVDQU ·AVX2_iv0<>+0(SB), Y6 + VMOVDQU ·AVX2_iv1<>+0(SB), Y7 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) loop: - ADDQ $128, R8 - MOVQ R8, 0(DX) - CMPQ R8, $128 + ADDQ $0x80, R8 + MOVQ R8, (DX) + CMPQ R8, $0x80 JGE noinc INCQ R9 MOVQ R9, 8(DX) noinc: - VMOVDQA Y8, Y0 - VMOVDQA Y9, Y1 - VMOVDQA Y6, Y2 - VPXOR 0(DX), Y7, Y3 - - LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(DX) - VMOVDQA Y13, 64(DX) - VMOVDQA Y14, 96(DX) - VMOVDQA Y15, 128(DX) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(DX) - VMOVDQA Y13, 192(DX) - VMOVDQA Y14, 224(DX) - VMOVDQA Y15, 256(DX) - - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - - ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) - ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) - - VPXOR Y0, Y8, Y8 - VPXOR Y1, Y9, Y9 - VPXOR Y2, Y8, Y8 - VPXOR Y3, Y9, Y9 - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VMOVDQU Y8, 0(AX) - VMOVDQU Y9, 32(AX) + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR (DX), Y7, Y3 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x48 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + VPSHUFD $0x4e, (SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x28 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + VMOVDQU 88(SI), X12 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + VPSHUFD $0x4e, 64(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x10 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + VMOVDQU 96(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + VMOVDQU 32(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + VPSHUFD $0x4e, 40(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 32(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 64(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 96(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 128(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 160(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 192(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 224(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 256(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VMOVDQU Y8, (AX) + VMOVDQU Y9, 32(AX) VZEROUPPER - RET -#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA -#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB -#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF -#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD -#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE - -#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF -#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF - -#define SHUFFLE_AVX() \ - VMOVDQA X6, X13; \ - VMOVDQA X2, X14; \ - VMOVDQA X4, X6; \ - VPUNPCKLQDQ_X13_X13_X15; \ - VMOVDQA X5, X4; \ - VMOVDQA X6, X5; \ - VPUNPCKHQDQ_X15_X7_X6; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X13_X7; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VPUNPCKHQDQ_X15_X2_X2; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X3_X3; \ - -#define SHUFFLE_AVX_INV() \ - VMOVDQA X2, X13; \ - VMOVDQA X4, X14; \ - VPUNPCKLQDQ_X2_X2_X15; \ - VMOVDQA X5, X4; \ - VPUNPCKHQDQ_X15_X3_X2; \ - VMOVDQA X14, X5; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VMOVDQA X6, X14; \ - VPUNPCKHQDQ_X15_X13_X3; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X6_X6; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X7_X7; \ - -#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - VPADDQ m0, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m1, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFD $-79, v6, v6; \ - VPSHUFD $-79, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPSHUFB c40, v2, v2; \ - VPSHUFB c40, v3, v3; \ - VPADDQ m2, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m3, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFB c48, v6, v6; \ - VPSHUFB c48, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPADDQ v2, v2, t0; \ - VPSRLQ $63, v2, v2; \ - VPXOR t0, v2, v2; \ - VPADDQ v3, v3, t0; \ - VPSRLQ $63, v3, v3; \ - VPXOR t0, v3, v3 - -// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) -// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 -#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X13(i2*8); \ - VMOVQ_SI_X14(i4*8); \ - VMOVQ_SI_X15(i6*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X13(i3*8); \ - VPINSRQ_1_SI_X14(i5*8); \ - VPINSRQ_1_SI_X15(i7*8) - -// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) -#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(1*8); \ - VMOVQ_SI_X15(5*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X13(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(7*8) - -// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) -#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ - VPSHUFD $0x4E, 0*8(SI), X12; \ - VMOVQ_SI_X13(11*8); \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(7*8); \ - VPINSRQ_1_SI_X13(5*8); \ - VPINSRQ_1_SI_X14(2*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) -#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ - VMOVDQU 11*8(SI), X12; \ - VMOVQ_SI_X13(5*8); \ - VMOVQ_SI_X14(8*8); \ - VMOVQ_SI_X15(2*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14_0; \ - VPINSRQ_1_SI_X15(13*8) - -// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) -#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(6*8); \ - VMOVQ_SI_X15_0; \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(8*8) +DATA ·AVX2_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+16(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+24(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) -#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ - VMOVQ_SI_X12(9*8); \ - VMOVQ_SI_X13(2*8); \ - VMOVQ_SI_X14_0; \ - VMOVQ_SI_X15(4*8); \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VPINSRQ_1_SI_X15(15*8) +DATA ·AVX2_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+16(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+24(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) -#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(11*8); \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X13(8*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(3*8) +DATA ·AVX2_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+16(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+24(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), RODATA|NOPTR, $32 -// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) -#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ - MOVQ 0*8(SI), X12; \ - VPSHUFD $0x4E, 8*8(SI), X13; \ - MOVQ 7*8(SI), X14; \ - MOVQ 2*8(SI), X15; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(11*8) - -// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) -#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ - MOVQ 6*8(SI), X12; \ - MOVQ 11*8(SI), X13; \ - MOVQ 15*8(SI), X14; \ - MOVQ 3*8(SI), X15; \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X14(9*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) -#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ - MOVQ 5*8(SI), X12; \ - MOVQ 8*8(SI), X13; \ - MOVQ 0*8(SI), X14; \ - MOVQ 6*8(SI), X15; \ - VPINSRQ_1_SI_X12(15*8); \ - VPINSRQ_1_SI_X13(2*8); \ - VPINSRQ_1_SI_X14(4*8); \ - VPINSRQ_1_SI_X15(10*8) - -// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) -#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ - VMOVDQU 12*8(SI), X12; \ - MOVQ 1*8(SI), X13; \ - MOVQ 2*8(SI), X14; \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VMOVDQU 4*8(SI), X15 - -// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) -#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ - MOVQ 15*8(SI), X12; \ - MOVQ 3*8(SI), X13; \ - MOVQ 11*8(SI), X14; \ - MOVQ 12*8(SI), X15; \ - VPINSRQ_1_SI_X12(9*8); \ - VPINSRQ_1_SI_X13(13*8); \ - VPINSRQ_1_SI_X14(14*8); \ - VPINSRQ_1_SI_X15_0 +DATA ·AVX2_iv1<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+8(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+16(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+24(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), RODATA|NOPTR, $32 // func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - VMOVDQU ·AVX_c40<>(SB), X0 - VMOVDQU ·AVX_c48<>(SB), X1 +// Requires: AVX, SSE2 +TEXT ·hashBlocksAVX(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + VMOVDQU ·AVX_c40<>+0(SB), X0 + VMOVDQU ·AVX_c48<>+0(SB), X1 VMOVDQA X0, X8 VMOVDQA X1, X9 - - VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) - - VMOVDQU 0(AX), X10 + VMOVDQU ·AVX_iv3<>+0(SB), X0 + VMOVDQA X0, (R10) + XORQ CX, (R10) + VMOVDQU (AX), X10 VMOVDQU 16(AX), X11 VMOVDQU 32(AX), X2 VMOVDQU 48(AX), X3 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - VMOVQ_R8_X15 - VPINSRQ_1_R9_X15 - + BYTE $0xc4 + BYTE $0x41 + BYTE $0xf9 + BYTE $0x6e + BYTE $0xf8 + BYTE $0xc4 + BYTE $0x43 + BYTE $0x81 + BYTE $0x22 + BYTE $0xf9 + BYTE $0x01 VMOVDQA X10, X0 VMOVDQA X11, X1 - VMOVDQU ·AVX_iv0<>(SB), X4 - VMOVDQU ·AVX_iv1<>(SB), X5 - VMOVDQU ·AVX_iv2<>(SB), X6 - + VMOVDQU ·AVX_iv0<>+0(SB), X4 + VMOVDQU ·AVX_iv1<>+0(SB), X5 + VMOVDQU ·AVX_iv2<>+0(SB), X6 VPXOR X15, X6, X6 - VMOVDQA 0(R10), X7 - - LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA (R10), X7 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 VMOVDQA X12, 16(R10) VMOVDQA X13, 32(R10) VMOVDQA X14, 48(R10) VMOVDQA X15, 64(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 VMOVDQA X12, 80(R10) VMOVDQA X13, 96(R10) VMOVDQA X14, 112(R10) VMOVDQA X15, 128(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 VMOVDQA X12, 144(R10) VMOVDQA X13, 160(R10) VMOVDQA X14, 176(R10) VMOVDQA X15, 192(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPSHUFD $0x4e, (SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 VMOVDQA X12, 208(R10) VMOVDQA X13, 224(R10) VMOVDQA X14, 240(R10) VMOVDQA X15, 256(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_11_12_5_15_8_0_2_13() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_2_5_4_15_6_10_0_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_9_5_2_10_0_7_4_15() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_2_6_0_8_12_10_11_3() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_0_6_9_8_7_3_2_11() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_5_15_8_2_0_4_6_10() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_6_14_11_0_15_9_3_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_12_13_1_10_2_7_4_5() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_15_9_3_13_11_14_12_0() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VMOVDQU 88(SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x36 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ (SI), X12 + VPSHUFD $0x4e, 64(SI), X13 + MOVQ 56(SI), X14 + MOVQ 16(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 40(SI), X12 + MOVQ 64(SI), X13 + MOVQ (SI), X14 + MOVQ 48(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + MOVQ 48(SI), X12 + MOVQ 88(SI), X13 + MOVQ 120(SI), X14 + MOVQ 24(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VMOVDQU 96(SI), X12 + MOVQ 8(SI), X13 + MOVQ 16(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + VMOVDQU 32(SI), X15 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 120(SI), X12 + MOVQ 24(SI), X13 + MOVQ 88(SI), X14 + MOVQ 96(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x3e + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 16(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 32(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 48(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 64(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 80(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 96(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 112(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 128(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 144(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 160(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 176(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 192(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 208(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 224(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 240(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 256(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff VMOVDQU 32(AX), X14 VMOVDQU 48(AX), X15 VPXOR X0, X10, X10 @@ -729,16 +4524,36 @@ noinc: VPXOR X7, X15, X3 VMOVDQU X2, 32(AX) VMOVDQU X3, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + VMOVDQU X10, (AX) + VMOVDQU X11, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VZEROUPPER + RET - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop +DATA ·AVX_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), RODATA|NOPTR, $16 - VMOVDQU X10, 0(AX) - VMOVDQU X11, 16(AX) +DATA ·AVX_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), RODATA|NOPTR, $16 - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - VZEROUPPER +DATA ·AVX_iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), RODATA|NOPTR, $16 - RET +DATA ·AVX_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index adfac00c15c9c..9a0ce2124462f 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -1,278 +1,1441 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2b_amd64_asm.go -out ../../blake2b_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - PADDQ m0, v0; \ - PADDQ m1, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v6, v6; \ - PSHUFD $0xB1, v7, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - PSHUFB c40, v2; \ - PSHUFB c40, v3; \ - PADDQ m2, v0; \ - PADDQ m3, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFB c48, v6; \ - PSHUFB c48, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - MOVOU v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVOU v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), m0; \ - PINSRQ $1, i1*8(src), m0; \ - MOVQ i2*8(src), m1; \ - PINSRQ $1, i3*8(src), m1; \ - MOVQ i4*8(src), m2; \ - PINSRQ $1, i5*8(src), m2; \ - MOVQ i6*8(src), m3; \ - PINSRQ $1, i7*8(src), m3 - // func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) - - MOVOU ·c40<>(SB), X13 - MOVOU ·c48<>(SB), X14 - - MOVOU 0(AX), X12 +// Requires: SSE2, SSE4.1, SSSE3 +TEXT ·hashBlocksSSE4(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + MOVOU ·iv3<>+0(SB), X0 + MOVO X0, (R10) + XORQ CX, (R10) + MOVOU ·c40<>+0(SB), X13 + MOVOU ·c48<>+0(SB), X14 + MOVOU (AX), X12 MOVOU 16(AX), X15 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - MOVQ R8, X8 - PINSRQ $1, R9, X8 - - MOVO X12, X0 - MOVO X15, X1 - MOVOU 32(AX), X2 - MOVOU 48(AX), X3 - MOVOU ·iv0<>(SB), X4 - MOVOU ·iv1<>(SB), X5 - MOVOU ·iv2<>(SB), X6 - - PXOR X8, X6 - MOVO 0(R10), X7 - - LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(R10) - MOVO X9, 32(R10) - MOVO X10, 48(R10) - MOVO X11, 64(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(R10) - MOVO X9, 96(R10) - MOVO X10, 112(R10) - MOVO X11, 128(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(R10) - MOVO X9, 160(R10) - MOVO X10, 176(R10) - MOVO X11, 192(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(R10) - MOVO X9, 224(R10) - MOVO X10, 240(R10) - MOVO X11, 256(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + MOVQ R8, X8 + PINSRQ $0x01, R9, X8 + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>+0(SB), X4 + MOVOU ·iv1<>+0(SB), X5 + MOVOU ·iv2<>+0(SB), X6 + PXOR X8, X6 + MOVO (R10), X7 + MOVQ (SI), X8 + PINSRQ $0x01, 16(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 48(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 40(SI), X11 + PINSRQ $0x01, 56(SI), X11 + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 64(SI), X8 + PINSRQ $0x01, 80(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 112(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 88(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 120(SI), X11 + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 112(SI), X8 + PINSRQ $0x01, 32(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 80(SI), X10 + PINSRQ $0x01, 64(SI), X10 + MOVQ 120(SI), X11 + PINSRQ $0x01, 48(SI), X11 + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 8(SI), X8 + PINSRQ $0x01, (SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, 40(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 16(SI), X10 + MOVQ 56(SI), X11 + PINSRQ $0x01, 24(SI), X11 + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 88(SI), X8 + PINSRQ $0x01, 96(SI), X8 + MOVQ 40(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 64(SI), X10 + PINSRQ $0x01, (SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 80(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 72(SI), X9 + MOVQ 112(SI), X10 + PINSRQ $0x01, 48(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 32(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 56(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 104(SI), X9 + PINSRQ $0x01, 88(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 8(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, 112(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 16(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 48(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ (SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 72(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 16(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 120(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 112(SI), X8 + PINSRQ $0x01, 88(SI), X8 + MOVQ 48(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 96(SI), X10 + MOVQ 64(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 16(SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ (SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ 88(SI), X11 + PINSRQ $0x01, 24(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 32(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 120(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 104(SI), X10 + PINSRQ $0x01, 40(SI), X10 + MOVQ 112(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 96(SI), X8 + PINSRQ $0x01, 8(SI), X8 + MOVQ 112(SI), X9 + PINSRQ $0x01, 32(SI), X9 + MOVQ 40(SI), X10 + PINSRQ $0x01, 120(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ (SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 56(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 88(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 104(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 40(SI), X8 + PINSRQ $0x01, 120(SI), X8 + MOVQ 64(SI), X9 + PINSRQ $0x01, 16(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 48(SI), X8 + PINSRQ $0x01, 112(SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, (SI), X9 + MOVQ 120(SI), X10 + PINSRQ $0x01, 72(SI), X10 + MOVQ 24(SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 96(SI), X8 + PINSRQ $0x01, 104(SI), X8 + MOVQ 8(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 80(SI), X8 + PINSRQ $0x01, 64(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 120(SI), X8 + PINSRQ $0x01, 72(SI), X8 + MOVQ 24(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, (SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 16(R10), X0 + PADDQ 32(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 48(R10), X0 + PADDQ 64(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 80(R10), X0 + PADDQ 96(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 112(R10), X0 + PADDQ 128(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 144(R10), X0 + PADDQ 160(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 176(R10), X0 + PADDQ 192(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 208(R10), X0 + PADDQ 224(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 240(R10), X0 + PADDQ 256(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVOU X12, (AX) + MOVOU X15, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) + RET - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) +DATA ·iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), RODATA|NOPTR, $16 - MOVOU 32(AX), X10 - MOVOU 48(AX), X11 - PXOR X0, X12 - PXOR X1, X15 - PXOR X2, X10 - PXOR X3, X11 - PXOR X4, X12 - PXOR X5, X15 - PXOR X6, X10 - PXOR X7, X11 - MOVOU X10, 32(AX) - MOVOU X11, 48(AX) +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 - MOVOU X12, 0(AX) - MOVOU X15, 16(AX) +DATA ·iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), RODATA|NOPTR, $16 - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) +DATA ·iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), RODATA|NOPTR, $16 - RET +DATA ·iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s index 731d2ac6dbc11..fd5ee845f9faa 100644 --- a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -1,2715 +1,9762 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. +// Code generated by command: go run chacha20poly1305_amd64_asm.go -out ../chacha20poly1305_amd64.s -pkg chacha20poly1305. DO NOT EDIT. //go:build gc && !purego #include "textflag.h" -// General register allocation -#define oup DI -#define inp SI -#define inl BX -#define adp CX // free to reuse, after we hash the additional data -#define keyp R8 // free to reuse, when we copy the key to stack -#define itr2 R9 // general iterator -#define itr1 CX // general iterator -#define acc0 R10 -#define acc1 R11 -#define acc2 R12 -#define t0 R13 -#define t1 R14 -#define t2 R15 -#define t3 R8 -// Register and stack allocation for the SSE code -#define rStore (0*16)(BP) -#define sStore (1*16)(BP) -#define state1Store (2*16)(BP) -#define state2Store (3*16)(BP) -#define tmpStore (4*16)(BP) -#define ctr0Store (5*16)(BP) -#define ctr1Store (6*16)(BP) -#define ctr2Store (7*16)(BP) -#define ctr3Store (8*16)(BP) -#define A0 X0 -#define A1 X1 -#define A2 X2 -#define B0 X3 -#define B1 X4 -#define B2 X5 -#define C0 X6 -#define C1 X7 -#define C2 X8 -#define D0 X9 -#define D1 X10 -#define D2 X11 -#define T0 X12 -#define T1 X13 -#define T2 X14 -#define T3 X15 -#define A3 T0 -#define B3 T1 -#define C3 T2 -#define D3 T3 -// Register and stack allocation for the AVX2 code -#define rsStoreAVX2 (0*32)(BP) -#define state1StoreAVX2 (1*32)(BP) -#define state2StoreAVX2 (2*32)(BP) -#define ctr0StoreAVX2 (3*32)(BP) -#define ctr1StoreAVX2 (4*32)(BP) -#define ctr2StoreAVX2 (5*32)(BP) -#define ctr3StoreAVX2 (6*32)(BP) -#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack -#define AA0 Y0 -#define AA1 Y5 -#define AA2 Y6 -#define AA3 Y7 -#define BB0 Y14 -#define BB1 Y9 -#define BB2 Y10 -#define BB3 Y11 -#define CC0 Y12 -#define CC1 Y13 -#define CC2 Y8 -#define CC3 Y15 -#define DD0 Y4 -#define DD1 Y1 -#define DD2 Y2 -#define DD3 Y3 -#define TT0 DD3 -#define TT1 AA3 -#define TT2 BB3 -#define TT3 CC3 -// ChaCha20 constants -DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 -DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 -DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e -DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 -DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 -// <<< 16 with PSHUFB -DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A -DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 -DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A -// <<< 8 with PSHUFB -DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B -DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 -DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B - -DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 -DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 -DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 - -DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 -DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 -DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 -// Poly1305 key clamp -DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF -DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF - -DATA ·sseIncMask<>+0x00(SB)/8, $0x1 -DATA ·sseIncMask<>+0x08(SB)/8, $0x0 -// To load/store the last < 16 bytes in a buffer -DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff -DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 -DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff -DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff -DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff -DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff -DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff -DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff -DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff -DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff - -GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 -GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 -GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 -GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 -GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 -// No PALIGNR in Go ASM yet (but VPALIGNR is present). -#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 -#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 -#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 -#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 -#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 -#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 -#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 -#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 -#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 -#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 -#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 -#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 -#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 -#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 -#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 -#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 -#define shiftC0Right shiftC0Left -#define shiftC1Right shiftC1Left -#define shiftC2Right shiftC2Left -#define shiftC3Right shiftC3Left -#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 -#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 -#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 -#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 - -// Some macros - -// ROL rotates the uint32s in register R left by N bits, using temporary T. -#define ROL(N, R, T) \ - MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R - -// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. -#ifdef GOAMD64_v2 -#define ROL16(R, T) PSHUFB ·rol16<>(SB), R -#else -#define ROL16(R, T) ROL(16, R, T) -#endif - -// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. -#ifdef GOAMD64_v2 -#define ROL8(R, T) PSHUFB ·rol8<>(SB), R -#else -#define ROL8(R, T) ROL(8, R, T) -#endif - -#define chachaQR(A, B, C, D, T) \ - PADDD B, A; PXOR A, D; ROL16(D, T) \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ - PADDD B, A; PXOR A, D; ROL8(D, T) \ - PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B - -#define chachaQR_AVX2(A, B, C, D, T) \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ - VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ - VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B - -#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 -#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX -#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 -#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 - -#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 -#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 -#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 - -#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage -#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage -// ---------------------------------------------------------------------------- + +// func polyHashADInternal<>() TEXT polyHashADInternal<>(SB), NOSPLIT, $0 - // adp points to beginning of additional data - // itr2 holds ad length - XORQ acc0, acc0 - XORQ acc1, acc1 - XORQ acc2, acc2 - CMPQ itr2, $13 - JNE hashADLoop - -openFastTLSAD: - // Special treatment for the TLS case of 13 bytes - MOVQ (adp), acc0 - MOVQ 5(adp), acc1 - SHRQ $24, acc1 - MOVQ $1, acc2 - polyMul + // Hack: Must declare #define macros inside of a function due to Avo constraints + // ROL rotates the uint32s in register R left by N bits, using temporary T. + #define ROL(N, R, T) \ + MOVO R, T; \ + PSLLL $(N), T; \ + PSRLL $(32-(N)), R; \ + PXOR T, R + + // ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. + #ifdef GOAMD64_v2 + #define ROL8(R, T) PSHUFB ·rol8<>(SB), R + #else + #define ROL8(R, T) ROL(8, R, T) + #endif + + // ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. + #ifdef GOAMD64_v2 + #define ROL16(R, T) PSHUFB ·rol16<>(SB), R + #else + #define ROL16(R, T) ROL(16, R, T) + #endif + XORQ R10, R10 + XORQ R11, R11 + XORQ R12, R12 + CMPQ R9, $0x0d + JNE hashADLoop + MOVQ (CX), R10 + MOVQ 5(CX), R11 + SHRQ $0x18, R11 + MOVQ $0x00000001, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 RET hashADLoop: // Hash in 16 byte chunks - CMPQ itr2, $16 - JB hashADTail - polyAdd(0(adp)) - LEAQ (1*16)(adp), adp - SUBQ $16, itr2 - polyMul - JMP hashADLoop + CMPQ R9, $0x10 + JB hashADTail + ADDQ (CX), R10 + ADCQ 8(CX), R11 + ADCQ $0x01, R12 + LEAQ 16(CX), CX + SUBQ $0x10, R9 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + JMP hashADLoop hashADTail: - CMPQ itr2, $0 + CMPQ R9, $0x00 JE hashADDone // Hash last < 16 byte tail - XORQ t0, t0 - XORQ t1, t1 - XORQ t2, t2 - ADDQ itr2, adp + XORQ R13, R13 + XORQ R14, R14 + XORQ R15, R15 + ADDQ R9, CX hashADTailLoop: - SHLQ $8, t0, t1 - SHLQ $8, t0 - MOVB -1(adp), t2 - XORQ t2, t0 - DECQ adp - DECQ itr2 - JNE hashADTailLoop - -hashADTailFinish: - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - // Finished AD + SHLQ $0x08, R13, R14 + SHLQ $0x08, R13 + MOVB -1(CX), R15 + XORQ R15, R13 + DECQ CX + DECQ R9 + JNE hashADTailLoop + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + hashADDone: RET -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Open(dst, key, src, ad []byte) bool -TEXT ·chacha20Poly1305Open(SB), 0, $288-97 +// func chacha20Poly1305Open(dst []byte, key []uint32, src []byte, ad []byte) bool +// Requires: AVX, AVX2, BMI2, CMOV, SSE2 +TEXT ·chacha20Poly1305Open(SB), $288-97 // For aligned stack access MOVQ SP, BP - ADDQ $32, BP + ADDQ $0x20, BP ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp + MOVQ dst_base+0(FP), DI + MOVQ key_base+24(FP), R8 + MOVQ src_base+48(FP), SI + MOVQ src_len+56(FP), BX + MOVQ ad_base+72(FP), CX // Check for AVX2 support - CMPB ·useAVX2(SB), $1 + CMPB ·useAVX2+0(SB), $0x01 JE chacha20Poly1305Open_AVX2 // Special optimization, for very short buffers - CMPQ inl, $128 - JBE openSSE128 // About 16% faster + CMPQ BX, $0x80 + JBE openSSE128 // For long buffers, prepare the poly key first - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 - MOVO D0, T1 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X9, X13 // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store - MOVO D0, ctr3Store - MOVQ $10, itr2 + MOVO X3, 32(BP) + MOVO X6, 48(BP) + MOVO X9, 128(BP) + MOVQ $0x0000000a, R9 openSSEPreparePolyKey: - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - DECQ itr2 - JNE openSSEPreparePolyKey + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + DECQ R9 + JNE openSSEPreparePolyKey // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL 32(BP), X3 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore; MOVO B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVO X0, (BP) + MOVO X3, 16(BP) // Hash AAD - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openSSEMainLoop: - CMPQ inl, $256 + CMPQ BX, $0x00000100 JB openSSEMainLoopDone // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) - // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 - MOVQ $4, itr1 - MOVQ inp, itr2 + // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash + // 2 blocks, and for the remaining 4 only 1 block - for a total of 16 + MOVQ $0x00000004, CX + MOVQ SI, R9 openSSEInternalLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(itr2)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(itr2), itr2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr1 - JGE openSSEInternalLoop - - polyAdd(0(itr2)) - polyMul - LEAQ (2*8)(itr2), itr2 - - CMPQ itr1, $-6 - JG openSSEInternalLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + LEAQ 16(R9), R9 + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ CX + JGE openSSEInternalLoop + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + CMPQ CX, $-6 + JG openSSEInternalLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 // Load - xor - store - MOVO D3, tmpStore - MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) - MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) - MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) - MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) - MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) - MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) - MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) - MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) - MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) - MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) - MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) - MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) - MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) - MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) - LEAQ 256(inp), inp - LEAQ 256(oup), oup - SUBQ $256, inl + MOVO X15, 64(BP) + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU X0, (DI) + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU X3, 16(DI) + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU X6, 32(DI) + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X9, 48(DI) + MOVOU 64(SI), X9 + PXOR X9, X1 + MOVOU X1, 64(DI) + MOVOU 80(SI), X9 + PXOR X9, X4 + MOVOU X4, 80(DI) + MOVOU 96(SI), X9 + PXOR X9, X7 + MOVOU X7, 96(DI) + MOVOU 112(SI), X9 + PXOR X9, X10 + MOVOU X10, 112(DI) + MOVOU 128(SI), X9 + PXOR X9, X2 + MOVOU X2, 128(DI) + MOVOU 144(SI), X9 + PXOR X9, X5 + MOVOU X5, 144(DI) + MOVOU 160(SI), X9 + PXOR X9, X8 + MOVOU X8, 160(DI) + MOVOU 176(SI), X9 + PXOR X9, X11 + MOVOU X11, 176(DI) + MOVOU 192(SI), X9 + PXOR X9, X12 + MOVOU X12, 192(DI) + MOVOU 208(SI), X9 + PXOR X9, X13 + MOVOU X13, 208(DI) + MOVOU 224(SI), X9 + PXOR X9, X14 + MOVOU X14, 224(DI) + MOVOU 240(SI), X9 + PXOR 64(BP), X9 + MOVOU X9, 240(DI) + LEAQ 256(SI), SI + LEAQ 256(DI), DI + SUBQ $0x00000100, BX JMP openSSEMainLoop openSSEMainLoopDone: // Handle the various tail sizes efficiently - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize - CMPQ inl, $64 + CMPQ BX, $0x40 JBE openSSETail64 - CMPQ inl, $128 + CMPQ BX, $0x80 JBE openSSETail128 - CMPQ inl, $192 + CMPQ BX, $0xc0 JBE openSSETail192 JMP openSSETail256 openSSEFinalize: // Hash in the PT, AAD lengths - ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 - polyMul + ADDQ ad_len+80(FP), R10 + ADCQ src_len+56(FP), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 + MOVQ R10, R13 + MOVQ R11, R14 + MOVQ R12, R15 + SUBQ $-5, R10 + SBBQ $-1, R11 + SBBQ $0x03, R12 + CMOVQCS R13, R10 + CMOVQCS R14, R11 + CMOVQCS R15, R12 // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 + ADDQ 16(BP), R10 + ADCQ 24(BP), R11 // Finally, constant time compare to the tag at the end of the message XORQ AX, AX - MOVQ $1, DX - XORQ (0*8)(inp), acc0 - XORQ (1*8)(inp), acc1 - ORQ acc1, acc0 + MOVQ $0x00000001, DX + XORQ (SI), R10 + XORQ 8(SI), R11 + ORQ R11, R10 CMOVQEQ DX, AX // Return true iff tags are equal MOVB AX, ret+96(FP) RET -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 129 bytes openSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X3, X13 + MOVO X6, X14 + MOVO X10, X15 + MOVQ $0x0000000a, R9 openSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE openSSE128InnerCipherLoop + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ R9 + JNE openSSE128InnerCipherLoop // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL X13, X3 + PADDL X13, X4 + PADDL X13, X5 + PADDL X14, X7 + PADDL X14, X8 + PADDL X15, X10 + PADDL ·sseIncMask<>+0(SB), X15 + PADDL X15, X11 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore; MOVOU B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVOU X0, (BP) + MOVOU X3, 16(BP) // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openSSE128Open: - CMPQ inl, $16 + CMPQ BX, $0x10 JB openSSETail16 - SUBQ $16, inl + SUBQ $0x10, BX // Load for hashing - polyAdd(0(inp)) + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 // Load for decryption - MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - polyMul + MOVOU (SI), X12 + PXOR X12, X1 + MOVOU X1, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 + MOVO X4, X1 + MOVO X7, X4 + MOVO X10, X7 + MOVO X2, X10 + MOVO X5, X2 + MOVO X8, X5 + MOVO X11, X8 JMP openSSE128Open openSSETail16: - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize // We can safely load the CT from the end, because it is padded with the MAC - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVOU (inp), T0 - ADDQ inl, inp - PAND -16(t0)(itr2*1), T0 - MOVO T0, 0+tmpStore - MOVQ T0, t0 - MOVQ 8+tmpStore, t1 - PXOR A1, T0 + MOVQ BX, R9 + SHLQ $0x04, R9 + LEAQ ·andMask<>+0(SB), R13 + MOVOU (SI), X12 + ADDQ BX, SI + PAND -16(R13)(R9*1), X12 + MOVO X12, 64(BP) + MOVQ X12, R13 + MOVQ 72(BP), R14 + PXOR X1, X12 // We can only store one byte at a time, since plaintext can be shorter than 16 bytes openSSETail16Store: - MOVQ T0, t3 - MOVB t3, (oup) - PSRLDQ $1, T0 - INCQ oup - DECQ inl + MOVQ X12, R8 + MOVB R8, (DI) + PSRLDQ $0x01, X12 + INCQ DI + DECQ BX JNE openSSETail16Store - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 JMP openSSEFinalize -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of ciphertext openSSETail64: - // Need to decrypt up to 64 bytes - prepare single block - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - XORQ itr2, itr2 - MOVQ inl, itr1 - CMPQ itr1, $16 - JB openSSETail64LoopB + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + XORQ R9, R9 + MOVQ BX, CX + CMPQ CX, $0x10 + JB openSSETail64LoopB openSSETail64LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul - SUBQ $16, itr1 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX openSSETail64LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0) - shiftB0Left; shiftC0Left; shiftD0Left - chachaQR(A0, B0, C0, D0, T0) - shiftB0Right; shiftC0Right; shiftD0Right - - CMPQ itr1, $16 - JAE openSSETail64LoopA - - CMPQ itr2, $160 - JNE openSSETail64LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + CMPQ CX, $0x10 + JAE openSSETail64LoopA + CMPQ R9, $0xa0 + JNE openSSETail64LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL 32(BP), X3 + PADDL 48(BP), X6 + PADDL 80(BP), X9 openSSETail64DecLoop: - CMPQ inl, $16 + CMPQ BX, $0x10 JB openSSETail64DecLoopDone - SUBQ $16, inl - MOVOU (inp), T0 - PXOR T0, A0 - MOVOU A0, (oup) - LEAQ 16(inp), inp - LEAQ 16(oup), oup - MOVO B0, A0 - MOVO C0, B0 - MOVO D0, C0 + SUBQ $0x10, BX + MOVOU (SI), X12 + PXOR X12, X0 + MOVOU X0, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + MOVO X3, X0 + MOVO X6, X3 + MOVO X9, X6 JMP openSSETail64DecLoop openSSETail64DecLoopDone: - MOVO A0, A1 + MOVO X0, X1 JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext openSSETail128: - // Need to decrypt up to 128 bytes - prepare two blocks - MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 + MOVO ·chacha20Constants<>+0(SB), X1 + MOVO 32(BP), X4 + MOVO 48(BP), X7 + MOVO 128(BP), X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 80(BP) + MOVO X1, X0 + MOVO X4, X3 + MOVO X7, X6 + MOVO X10, X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 96(BP) + XORQ R9, R9 + MOVQ BX, CX + ANDQ $-16, CX openSSETail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSETail128LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - CMPQ itr2, itr1 - JB openSSETail128LoopA - - CMPQ itr2, $160 - JNE openSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr1Store, D0; PADDL ctr0Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - - SUBQ $64, inl - LEAQ 64(inp), inp - LEAQ 64(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of ciphertext + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + CMPQ R9, CX + JB openSSETail128LoopA + CMPQ R9, $0xa0 + JNE openSSETail128LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 96(BP), X9 + PADDL 80(BP), X10 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, (DI) + MOVOU X4, 16(DI) + MOVOU X7, 32(DI) + MOVOU X10, 48(DI) + SUBQ $0x40, BX + LEAQ 64(SI), SI + LEAQ 64(DI), DI + JMP openSSETail64DecLoop + openSSETail192: - // Need to decrypt up to 192 bytes - prepare three blocks - MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store - MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store - - MOVQ inl, itr1 - MOVQ $160, itr2 - CMPQ itr1, $160 - CMOVQGT itr2, itr1 - ANDQ $-16, itr1 - XORQ itr2, itr2 + MOVO ·chacha20Constants<>+0(SB), X2 + MOVO 32(BP), X5 + MOVO 48(BP), X8 + MOVO 128(BP), X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X11, 80(BP) + MOVO X2, X1 + MOVO X5, X4 + MOVO X8, X7 + MOVO X11, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) + MOVO X1, X0 + MOVO X4, X3 + MOVO X7, X6 + MOVO X10, X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 112(BP) + MOVQ BX, CX + MOVQ $0x000000a0, R9 + CMPQ CX, $0xa0 + CMOVQGT R9, CX + ANDQ $-16, CX + XORQ R9, R9 openSSLTail192LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMul + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSLTail192LoopB: - ADDQ $16, itr2 - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - CMPQ itr2, itr1 - JB openSSLTail192LoopA - - CMPQ itr2, $160 - JNE openSSLTail192LoopB - - CMPQ inl, $176 - JB openSSLTail192Store - - polyAdd(160(inp)) - polyMul - - CMPQ inl, $192 - JB openSSLTail192Store - - polyAdd(176(inp)) - polyMul + ADDQ $0x10, R9 + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + CMPQ R9, CX + JB openSSLTail192LoopA + CMPQ R9, $0xa0 + JNE openSSLTail192LoopB + CMPQ BX, $0xb0 + JB openSSLTail192Store + ADDQ 160(SI), R10 + ADCQ 168(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + CMPQ BX, $0xc0 + JB openSSLTail192Store + ADDQ 176(SI), R10 + ADCQ 184(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openSSLTail192Store: - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 - MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) - - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - SUBQ $128, inl - LEAQ 128(inp), inp - LEAQ 128(oup), oup - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 32(BP), X5 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 48(BP), X8 + PADDL 112(BP), X9 + PADDL 96(BP), X10 + PADDL 80(BP), X11 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X2 + PXOR X13, X5 + PXOR X14, X8 + PXOR X15, X11 + MOVOU X2, (DI) + MOVOU X5, 16(DI) + MOVOU X8, 32(DI) + MOVOU X11, 48(DI) + MOVOU 64(SI), X12 + MOVOU 80(SI), X13 + MOVOU 96(SI), X14 + MOVOU 112(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + SUBQ $0x80, BX + LEAQ 128(SI), SI + LEAQ 128(DI), DI + JMP openSSETail64DecLoop + openSSETail256: - // Need to decrypt up to 256 bytes - prepare four blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - XORQ itr2, itr2 + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + XORQ R9, R9 openSSETail256Loop: - // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication - polyAdd(0(inp)(itr2*1)) - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulStage3 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - ADDQ $2*8, itr2 - CMPQ itr2, $160 - JB openSSETail256Loop - MOVQ inl, itr1 - ANDQ $-16, itr1 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + ADDQ $0x10, R9 + CMPQ R9, $0xa0 + JB openSSETail256Loop + MOVQ BX, CX + ANDQ $-16, CX openSSETail256HashLoop: - polyAdd(0(inp)(itr2*1)) - polyMul - ADDQ $2*8, itr2 - CMPQ itr2, itr1 - JB openSSETail256HashLoop + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ $0x10, R9 + CMPQ R9, CX + JB openSSETail256HashLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + MOVO X15, 64(BP) // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - LEAQ 192(inp), inp - LEAQ 192(oup), oup - SUBQ $192, inl - MOVO A3, A0 - MOVO B3, B0 - MOVO C3, C0 - MOVO tmpStore, D0 - - JMP openSSETail64DecLoop - -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVOU 128(SI), X0 + MOVOU 144(SI), X3 + MOVOU 160(SI), X6 + MOVOU 176(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 128(DI) + MOVOU X5, 144(DI) + MOVOU X8, 160(DI) + MOVOU X11, 176(DI) + LEAQ 192(SI), SI + LEAQ 192(DI), DI + SUBQ $0xc0, BX + MOVO X12, X0 + MOVO X13, X3 + MOVO X14, X6 + MOVO 64(BP), X9 + JMP openSSETail64DecLoop + chacha20Poly1305Open_AVX2: VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x70 + BYTE $0x10 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x20 + BYTE $0xc4 + BYTE $0xc2 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x30 + VPADDD ·avx2InitMask<>+0(SB), Y4, Y4 // Special optimization, for very short buffers - CMPQ inl, $192 + CMPQ BX, $0xc0 JBE openAVX2192 - CMPQ inl, $320 + CMPQ BX, $0x00000140 JBE openAVX2320 // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, state2StoreAVX2 - VMOVDQA DD0, ctr3StoreAVX2 - MOVQ $10, itr2 + VMOVDQA Y14, 32(BP) + VMOVDQA Y12, 64(BP) + VMOVDQA Y4, 192(BP) + MOVQ $0x0000000a, R9 openAVX2PreparePolyKey: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - DECQ itr2 - JNE openAVX2PreparePolyKey - - VPADDD ·chacha20Constants<>(SB), AA0, AA0 - VPADDD state1StoreAVX2, BB0, BB0 - VPADDD state2StoreAVX2, CC0, CC0 - VPADDD ctr3StoreAVX2, DD0, DD0 - - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + DECQ R9 + JNE openAVX2PreparePolyKey + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD 32(BP), Y14, Y14 + VPADDD 64(BP), Y12, Y12 + VPADDD 192(BP), Y4, Y4 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for the first 64 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 // Hash AD + first 64 bytes - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX openAVX2InitialHash64: - polyAdd(0(inp)(itr1*1)) - polyMulAVX2 - ADDQ $16, itr1 - CMPQ itr1, $64 - JNE openAVX2InitialHash64 + ADDQ (SI)(CX*1), R10 + ADCQ 8(SI)(CX*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ $0x10, CX + CMPQ CX, $0x40 + JNE openAVX2InitialHash64 // Decrypt the first 64 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), BB0, BB0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU BB0, (1*32)(oup) - LEAQ (2*32)(inp), inp - LEAQ (2*32)(oup), oup - SUBQ $64, inl + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y14, Y14 + VMOVDQU Y0, (DI) + VMOVDQU Y14, 32(DI) + LEAQ 64(SI), SI + LEAQ 64(DI), DI + SUBQ $0x40, BX openAVX2MainLoop: - CMPQ inl, $512 + CMPQ BX, $0x00000200 JB openAVX2MainLoopDone // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + XORQ CX, CX openAVX2InternalLoop: - // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications - // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext - polyAdd(0*8(inp)(itr1*1)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(inp)(itr1*1)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(inp)(itr1*1)) - LEAQ (6*8)(itr1), itr1 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - CMPQ itr1, $480 + ADDQ (SI)(CX*1), R10 + ADCQ 8(SI)(CX*1), R11 + ADCQ $0x01, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + ADDQ 16(SI)(CX*1), R10 + ADCQ 24(SI)(CX*1), R11 + ADCQ $0x01, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 32(SI)(CX*1), R10 + ADCQ 40(SI)(CX*1), R11 + ADCQ $0x01, R12 + LEAQ 48(CX), CX + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + CMPQ CX, $0x000001e0 JNE openAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(480(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + ADDQ 480(SI), R10 + ADCQ 488(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) // and here - polyAdd(496(inp)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - LEAQ (32*16)(oup), oup - SUBQ $(32*16), inl + ADDQ 496(SI), R10 + ADCQ 504(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + VPXOR 384(SI), Y0, Y0 + VPXOR 416(SI), Y14, Y14 + VPXOR 448(SI), Y12, Y12 + VPXOR 480(SI), Y4, Y4 + VMOVDQU Y0, 384(DI) + VMOVDQU Y14, 416(DI) + VMOVDQU Y12, 448(DI) + VMOVDQU Y4, 480(DI) + LEAQ 512(SI), SI + LEAQ 512(DI), DI + SUBQ $0x00000200, BX JMP openAVX2MainLoop openAVX2MainLoopDone: // Handle the various tail sizes efficiently - TESTQ inl, inl + TESTQ BX, BX JE openSSEFinalize - CMPQ inl, $128 + CMPQ BX, $0x80 JBE openAVX2Tail128 - CMPQ inl, $256 + CMPQ BX, $0x00000100 JBE openAVX2Tail256 - CMPQ inl, $384 + CMPQ BX, $0x00000180 JBE openAVX2Tail384 JMP openAVX2Tail512 -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes openAVX2192: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VMOVDQA Y4, Y2 + VMOVDQA Y1, Y15 + MOVQ $0x0000000a, R9 openAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ R9 JNE openAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y6, Y0, Y0 + VPADDD Y6, Y5, Y5 + VPADDD Y10, Y14, Y14 + VPADDD Y10, Y9, Y9 + VPADDD Y8, Y12, Y12 + VPADDD Y8, Y13, Y13 + VPADDD Y2, Y4, Y4 + VPADDD Y15, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 openAVX2ShortOpen: // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) openAVX2ShortOpenLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB openAVX2ShortTail32 - SUBQ $32, inl + SUBQ $0x20, BX // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 - polyAdd(2*8(inp)) - polyMulAVX2 + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(SI), R10 + ADCQ 24(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI + LEAQ 32(DI), DI // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 + VMOVDQA Y5, Y4 + VMOVDQA Y9, Y5 + VMOVDQA Y13, Y9 + VMOVDQA Y1, Y13 + VMOVDQA Y6, Y1 + VMOVDQA Y10, Y6 JMP openAVX2ShortOpenLoop openAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB openAVX2ShortDone - - SUBQ $16, inl + SUBQ $0x10, BX // Load for hashing - polyAdd(0*8(inp)) - polyMulAVX2 + ADDQ (SI), R10 + ADCQ 8(SI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 openAVX2ShortDone: VZEROUPPER JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes openAVX2320: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y14, Y7 + VMOVDQA Y12, Y11 + VMOVDQA Y4, Y15 + MOVQ $0x0000000a, R9 openAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ R9 JNE openAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 + VMOVDQA ·chacha20Constants<>+0(SB), Y3 + VPADDD Y3, Y0, Y0 + VPADDD Y3, Y5, Y5 + VPADDD Y3, Y6, Y6 + VPADDD Y7, Y14, Y14 + VPADDD Y7, Y9, Y9 + VPADDD Y7, Y10, Y10 + VPADDD Y11, Y12, Y12 + VPADDD Y11, Y13, Y13 + VPADDD Y11, Y8, Y8 + VMOVDQA ·avx2IncMask<>+0(SB), Y3 + VPADDD Y15, Y4, Y4 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y1, Y1 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y2, Y2 // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + VPERM2I128 $0x02, Y6, Y10, Y13 + VPERM2I128 $0x02, Y8, Y2, Y1 + VPERM2I128 $0x13, Y6, Y10, Y6 + VPERM2I128 $0x13, Y8, Y2, Y10 JMP openAVX2ShortOpen -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext openAVX2Tail128: // Need to decrypt up to 128 bytes - prepare two blocks - VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD1 - VMOVDQA DD1, DD0 - - XORQ itr2, itr2 - MOVQ inl, itr1 - ANDQ $-16, itr1 - TESTQ itr1, itr1 - JE openAVX2Tail128LoopB + VMOVDQA ·chacha20Constants<>+0(SB), Y5 + VMOVDQA 32(BP), Y9 + VMOVDQA 64(BP), Y13 + VMOVDQA 192(BP), Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y1 + VMOVDQA Y1, Y4 + XORQ R9, R9 + MOVQ BX, CX + ANDQ $-16, CX + TESTQ CX, CX + JE openAVX2Tail128LoopB openAVX2Tail128LoopA: - // Perform ChaCha rounds, while hashing the remaining input - polyAdd(0(inp)(itr2*1)) - polyMulAVX2 + ADDQ (SI)(R9*1), R10 + ADCQ 8(SI)(R9*1), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 openAVX2Tail128LoopB: - ADDQ $16, itr2 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 - JB openAVX2Tail128LoopA - CMPQ itr2, $160 - JNE openAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC1, CC1 - VPADDD DD0, DD1, DD1 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + ADDQ $0x10, R9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + CMPQ R9, CX + JB openAVX2Tail128LoopA + CMPQ R9, $0xa0 + JNE openAVX2Tail128LoopB + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y13, Y13 + VPADDD Y4, Y1, Y1 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 openAVX2TailLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB openAVX2Tail - SUBQ $32, inl + SUBQ $0x20, BX // Load for decryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp - LEAQ (1*32)(oup), oup - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI + LEAQ 32(DI), DI + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 JMP openAVX2TailLoop openAVX2Tail: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB openAVX2TailDone - SUBQ $16, inl + SUBQ $0x10, BX // Load for decryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 openAVX2TailDone: VZEROUPPER JMP openSSETail16 -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext openAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare four blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 // Compute the number of iterations that will hash data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $128, itr1 - SHRQ $4, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 + MOVQ BX, 224(BP) + MOVQ BX, CX + SUBQ $0x80, CX + SHRQ $0x04, CX + MOVQ $0x0000000a, R9 + CMPQ CX, $0x0a + CMOVQGT R9, CX + MOVQ SI, BX + XORQ R9, R9 openAVX2Tail256LoopA: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX - // Perform ChaCha rounds, while hashing the remaining input openAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - CMPQ itr2, itr1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + INCQ R9 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + CMPQ R9, CX JB openAVX2Tail256LoopA + CMPQ R9, $0x0a + JNE openAVX2Tail256LoopB + MOVQ BX, R9 + SUBQ SI, BX + MOVQ BX, CX + MOVQ 224(BP), BX - CMPQ itr2, $10 - JNE openAVX2Tail256LoopB - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl - - // Hash the remainder of data (if any) openAVX2Tail256Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail256HashEnd - polyAdd (0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail256Hash - -// Store 128 bytes safely, then go to store loop + ADDQ $0x10, CX + CMPQ CX, BX + JGT openAVX2Tail256HashEnd + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + JMP openAVX2Tail256Hash + openAVX2Tail256HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - - VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 - VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) - LEAQ (4*32)(inp), inp - LEAQ (4*32)(oup), oup - SUBQ $4*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y6 + VPERM2I128 $0x02, Y12, Y4, Y10 + VPERM2I128 $0x13, Y0, Y14, Y8 + VPERM2I128 $0x13, Y12, Y4, Y2 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR (SI), Y6, Y6 + VPXOR 32(SI), Y10, Y10 + VPXOR 64(SI), Y8, Y8 + VPXOR 96(SI), Y2, Y2 + VMOVDQU Y6, (DI) + VMOVDQU Y10, 32(DI) + VMOVDQU Y8, 64(DI) + VMOVDQU Y2, 96(DI) + LEAQ 128(SI), SI + LEAQ 128(DI), DI + SUBQ $0x80, BX + JMP openAVX2TailLoop + openAVX2Tail384: // Need to decrypt up to 384 bytes - prepare six blocks - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, ctr0StoreAVX2 - VMOVDQA DD1, ctr1StoreAVX2 - VMOVDQA DD2, ctr2StoreAVX2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) // Compute the number of iterations that will hash two blocks of data - MOVQ inl, tmpStoreAVX2 - MOVQ inl, itr1 - SUBQ $256, itr1 - SHRQ $4, itr1 - ADDQ $6, itr1 - MOVQ $10, itr2 - CMPQ itr1, $10 - CMOVQGT itr2, itr1 - MOVQ inp, inl - XORQ itr2, itr2 - - // Perform ChaCha rounds, while hashing the remaining input + MOVQ BX, 224(BP) + MOVQ BX, CX + SUBQ $0x00000100, CX + SHRQ $0x04, CX + ADDQ $0x06, CX + MOVQ $0x0000000a, R9 + CMPQ CX, $0x0a + CMOVQGT R9, CX + MOVQ SI, BX + XORQ R9, R9 + openAVX2Tail384LoopB: - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX openAVX2Tail384LoopA: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - polyAdd(0(inl)) - polyMulAVX2 - LEAQ 16(inl), inl - INCQ itr2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - - CMPQ itr2, itr1 - JB openAVX2Tail384LoopB - - CMPQ itr2, $10 - JNE openAVX2Tail384LoopA - - MOVQ inl, itr2 - SUBQ inp, inl - MOVQ inl, itr1 - MOVQ tmpStoreAVX2, inl + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + ADDQ (BX), R10 + ADCQ 8(BX), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(BX), BX + INCQ R9 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + CMPQ R9, CX + JB openAVX2Tail384LoopB + CMPQ R9, $0x0a + JNE openAVX2Tail384LoopA + MOVQ BX, R9 + SUBQ SI, BX + MOVQ BX, CX + MOVQ 224(BP), BX openAVX2Tail384Hash: - ADDQ $16, itr1 - CMPQ itr1, inl - JGT openAVX2Tail384HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - JMP openAVX2Tail384Hash - -// Store 256 bytes safely, then go to store loop + ADDQ $0x10, CX + CMPQ CX, BX + JGT openAVX2Tail384HashEnd + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + JMP openAVX2Tail384Hash + openAVX2Tail384HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - LEAQ (8*32)(inp), inp - LEAQ (8*32)(oup), oup - SUBQ $8*32, inl + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y3 + VPERM2I128 $0x02, Y13, Y1, Y7 + VPERM2I128 $0x13, Y5, Y9, Y11 + VPERM2I128 $0x13, Y13, Y1, Y15 + VPXOR 128(SI), Y3, Y3 + VPXOR 160(SI), Y7, Y7 + VPXOR 192(SI), Y11, Y11 + VPXOR 224(SI), Y15, Y15 + VMOVDQU Y3, 128(DI) + VMOVDQU Y7, 160(DI) + VMOVDQU Y11, 192(DI) + VMOVDQU Y15, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + LEAQ 256(SI), SI + LEAQ 256(DI), DI + SUBQ $0x00000100, BX JMP openAVX2TailLoop -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext openAVX2Tail512: - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - XORQ itr1, itr1 - MOVQ inp, itr2 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + XORQ CX, CX + MOVQ SI, R9 openAVX2Tail512LoopB: - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ (2*8)(itr2), itr2 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 openAVX2Tail512LoopA: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(itr2)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(itr2)) - polyMulAVX2 - LEAQ (4*8)(itr2), itr2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - INCQ itr1 - CMPQ itr1, $4 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 16(R9), R10 + ADCQ 24(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(R9), R9 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + INCQ CX + CMPQ CX, $0x04 JLT openAVX2Tail512LoopB - - CMPQ itr1, $10 - JNE openAVX2Tail512LoopA - - MOVQ inl, itr1 - SUBQ $384, itr1 - ANDQ $-16, itr1 + CMPQ CX, $0x0a + JNE openAVX2Tail512LoopA + MOVQ BX, CX + SUBQ $0x00000180, CX + ANDQ $-16, CX openAVX2Tail512HashLoop: - TESTQ itr1, itr1 + TESTQ CX, CX JE openAVX2Tail512HashEnd - polyAdd(0(itr2)) - polyMulAVX2 - LEAQ 16(itr2), itr2 - SUBQ $16, itr1 + ADDQ (R9), R10 + ADCQ 8(R9), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(R9), R9 + SUBQ $0x10, CX JMP openAVX2Tail512HashLoop openAVX2Tail512HashEnd: - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - LEAQ (12*32)(inp), inp - LEAQ (12*32)(oup), oup - SUBQ $12*32, inl - - JMP openAVX2TailLoop - -// ---------------------------------------------------------------------------- -// ---------------------------------------------------------------------------- -// func chacha20Poly1305Seal(dst, key, src, ad []byte) -TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 - // For aligned stack access + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + LEAQ 384(SI), SI + LEAQ 384(DI), DI + SUBQ $0x00000180, BX + JMP openAVX2TailLoop + +DATA ·chacha20Constants<>+0(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+4(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+8(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+12(SB)/4, $0x6b206574 +DATA ·chacha20Constants<>+16(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+20(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+24(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+28(SB)/4, $0x6b206574 +GLOBL ·chacha20Constants<>(SB), RODATA|NOPTR, $32 + +DATA ·polyClampMask<>+0(SB)/8, $0x0ffffffc0fffffff +DATA ·polyClampMask<>+8(SB)/8, $0x0ffffffc0ffffffc +DATA ·polyClampMask<>+16(SB)/8, $0xffffffffffffffff +DATA ·polyClampMask<>+24(SB)/8, $0xffffffffffffffff +GLOBL ·polyClampMask<>(SB), RODATA|NOPTR, $32 + +DATA ·sseIncMask<>+0(SB)/8, $0x0000000000000001 +DATA ·sseIncMask<>+8(SB)/8, $0x0000000000000000 +GLOBL ·sseIncMask<>(SB), RODATA|NOPTR, $16 + +DATA ·andMask<>+0(SB)/8, $0x00000000000000ff +DATA ·andMask<>+8(SB)/8, $0x0000000000000000 +DATA ·andMask<>+16(SB)/8, $0x000000000000ffff +DATA ·andMask<>+24(SB)/8, $0x0000000000000000 +DATA ·andMask<>+32(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+40(SB)/8, $0x0000000000000000 +DATA ·andMask<>+48(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+56(SB)/8, $0x0000000000000000 +DATA ·andMask<>+64(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+72(SB)/8, $0x0000000000000000 +DATA ·andMask<>+80(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+88(SB)/8, $0x0000000000000000 +DATA ·andMask<>+96(SB)/8, $0x00ffffffffffffff +DATA ·andMask<>+104(SB)/8, $0x0000000000000000 +DATA ·andMask<>+112(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+120(SB)/8, $0x0000000000000000 +DATA ·andMask<>+128(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+136(SB)/8, $0x00000000000000ff +DATA ·andMask<>+144(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+152(SB)/8, $0x000000000000ffff +DATA ·andMask<>+160(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+168(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+176(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+184(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+192(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+200(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+208(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+216(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+224(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+232(SB)/8, $0x00ffffffffffffff +GLOBL ·andMask<>(SB), RODATA|NOPTR, $240 + +DATA ·avx2InitMask<>+0(SB)/8, $0x0000000000000000 +DATA ·avx2InitMask<>+8(SB)/8, $0x0000000000000000 +DATA ·avx2InitMask<>+16(SB)/8, $0x0000000000000001 +DATA ·avx2InitMask<>+24(SB)/8, $0x0000000000000000 +GLOBL ·avx2InitMask<>(SB), RODATA|NOPTR, $32 + +DATA ·rol16<>+0(SB)/8, $0x0504070601000302 +DATA ·rol16<>+8(SB)/8, $0x0d0c0f0e09080b0a +DATA ·rol16<>+16(SB)/8, $0x0504070601000302 +DATA ·rol16<>+24(SB)/8, $0x0d0c0f0e09080b0a +GLOBL ·rol16<>(SB), RODATA|NOPTR, $32 + +DATA ·rol8<>+0(SB)/8, $0x0605040702010003 +DATA ·rol8<>+8(SB)/8, $0x0e0d0c0f0a09080b +DATA ·rol8<>+16(SB)/8, $0x0605040702010003 +DATA ·rol8<>+24(SB)/8, $0x0e0d0c0f0a09080b +GLOBL ·rol8<>(SB), RODATA|NOPTR, $32 + +DATA ·avx2IncMask<>+0(SB)/8, $0x0000000000000002 +DATA ·avx2IncMask<>+8(SB)/8, $0x0000000000000000 +DATA ·avx2IncMask<>+16(SB)/8, $0x0000000000000002 +DATA ·avx2IncMask<>+24(SB)/8, $0x0000000000000000 +GLOBL ·avx2IncMask<>(SB), RODATA|NOPTR, $32 + +// func chacha20Poly1305Seal(dst []byte, key []uint32, src []byte, ad []byte) +// Requires: AVX, AVX2, BMI2, CMOV, SSE2 +TEXT ·chacha20Poly1305Seal(SB), $288-96 MOVQ SP, BP - ADDQ $32, BP + ADDQ $0x20, BP ANDQ $-32, BP - MOVQ dst+0(FP), oup - MOVQ key+24(FP), keyp - MOVQ src+48(FP), inp - MOVQ src_len+56(FP), inl - MOVQ ad+72(FP), adp - - CMPB ·useAVX2(SB), $1 + MOVQ dst_base+0(FP), DI + MOVQ key_base+24(FP), R8 + MOVQ src_base+48(FP), SI + MOVQ src_len+56(FP), BX + MOVQ ad_base+72(FP), CX + CMPB ·useAVX2+0(SB), $0x01 JE chacha20Poly1305Seal_AVX2 // Special optimization, for very short buffers - CMPQ inl, $128 - JBE sealSSE128 // About 15% faster + CMPQ BX, $0x80 + JBE sealSSE128 // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration - MOVOU ·chacha20Constants<>(SB), A0 - MOVOU (1*16)(keyp), B0 - MOVOU (2*16)(keyp), C0 - MOVOU (3*16)(keyp), D0 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 // Store state on stack for future use - MOVO B0, state1Store - MOVO C0, state2Store + MOVO X3, 32(BP) + MOVO X6, 48(BP) // Load state, increment counter blocks - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store - MOVQ $10, itr2 + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) + MOVQ $0x0000000a, R9 sealSSEIntroLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JNE sealSSEIntroLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ R9 + JNE sealSSEIntroLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 // Clamp and store the key - PAND ·polyClampMask<>(SB), A0 - MOVO A0, rStore - MOVO B0, sStore + PAND ·polyClampMask<>+0(SB), X0 + MOVO X0, (BP) + MOVO X3, 16(BP) // Hash AAD - MOVQ ad_len+80(FP), itr2 - CALL polyHashADInternal<>(SB) - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) - - MOVQ $128, itr1 - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 - - CMPQ inl, $64 - JBE sealSSE128SealHash - - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) - - ADDQ $64, itr1 - SUBQ $64, inl - LEAQ 64(inp), inp - - MOVQ $2, itr1 - MOVQ $8, itr2 - - CMPQ inl, $64 - JBE sealSSETail64 - CMPQ inl, $128 - JBE sealSSETail128 - CMPQ inl, $192 - JBE sealSSETail192 + MOVQ ad_len+80(FP), R9 + CALL polyHashADInternal<>(SB) + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, (DI) + MOVOU X4, 16(DI) + MOVOU X7, 32(DI) + MOVOU X10, 48(DI) + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 64(DI) + MOVOU X5, 80(DI) + MOVOU X8, 96(DI) + MOVOU X11, 112(DI) + MOVQ $0x00000080, CX + SUBQ $0x80, BX + LEAQ 128(SI), SI + MOVO X12, X1 + MOVO X13, X4 + MOVO X14, X7 + MOVO X15, X10 + CMPQ BX, $0x40 + JBE sealSSE128SealHash + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X12 + PXOR X3, X13 + PXOR X6, X14 + PXOR X9, X15 + MOVOU X12, 128(DI) + MOVOU X13, 144(DI) + MOVOU X14, 160(DI) + MOVOU X15, 176(DI) + ADDQ $0x40, CX + SUBQ $0x40, BX + LEAQ 64(SI), SI + MOVQ $0x00000002, CX + MOVQ $0x00000008, R9 + CMPQ BX, $0x40 + JBE sealSSETail64 + CMPQ BX, $0x80 + JBE sealSSETail128 + CMPQ BX, $0xc0 + JBE sealSSETail192 sealSSEMainLoop: // Load state, increment counter blocks - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X2, X12 + MOVO X5, X13 + MOVO X8, X14 + MOVO X11, X15 + PADDL ·sseIncMask<>+0(SB), X15 // Store counters - MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVO X9, 80(BP) + MOVO X10, 96(BP) + MOVO X11, 112(BP) + MOVO X15, 128(BP) sealSSEInnerLoop: - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyAdd(0(oup)) - shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left - shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left - shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left - polyMulStage1 - polyMulStage2 - LEAQ (2*8)(oup), oup - MOVO C3, tmpStore - chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) - MOVO tmpStore, C3 - MOVO C1, tmpStore - polyMulStage3 - chachaQR(A3, B3, C3, D3, C1) - MOVO tmpStore, C1 - polyMulReduceStage - shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right - shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right - shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right - DECQ itr2 - JGE sealSSEInnerLoop - polyAdd(0(oup)) - polyMul - LEAQ (2*8)(oup), oup - DECQ itr1 - JG sealSSEInnerLoop + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x0c + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + LEAQ 16(DI), DI + MOVO X14, 64(BP) + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X3 + PXOR X14, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X14) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X3 + PXOR X14, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X4 + PXOR X14, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X14) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X4 + PXOR X14, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x0c, X14 + PSRLL $0x14, X5 + PXOR X14, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X14) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X14 + PSLLL $0x07, X14 + PSRLL $0x19, X5 + PXOR X14, X5 + MOVO 64(BP), X14 + MOVO X7, 64(BP) + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + PADDD X13, X12 + PXOR X12, X15 + ROL16(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x0c, X7 + PSRLL $0x14, X13 + PXOR X7, X13 + PADDD X13, X12 + PXOR X12, X15 + ROL8(X15, X7) + PADDD X15, X14 + PXOR X14, X13 + MOVO X13, X7 + PSLLL $0x07, X7 + PSRLL $0x19, X13 + PXOR X7, X13 + MOVO 64(BP), X7 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x04 + DECQ R9 + JGE sealSSEInnerLoop + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + DECQ CX + JG sealSSEInnerLoop // Add in the state - PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 - PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 - PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 - PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 - MOVO D3, tmpStore + PADDD ·chacha20Constants<>+0(SB), X0 + PADDD ·chacha20Constants<>+0(SB), X1 + PADDD ·chacha20Constants<>+0(SB), X2 + PADDD ·chacha20Constants<>+0(SB), X12 + PADDD 32(BP), X3 + PADDD 32(BP), X4 + PADDD 32(BP), X5 + PADDD 32(BP), X13 + PADDD 48(BP), X6 + PADDD 48(BP), X7 + PADDD 48(BP), X8 + PADDD 48(BP), X14 + PADDD 80(BP), X9 + PADDD 96(BP), X10 + PADDD 112(BP), X11 + PADDD 128(BP), X15 + MOVO X15, 64(BP) // Load - xor - store - MOVOU (0*16)(inp), D3; PXOR D3, A0 - MOVOU (1*16)(inp), D3; PXOR D3, B0 - MOVOU (2*16)(inp), D3; PXOR D3, C0 - MOVOU (3*16)(inp), D3; PXOR D3, D0 - MOVOU A0, (0*16)(oup) - MOVOU B0, (1*16)(oup) - MOVOU C0, (2*16)(oup) - MOVOU D0, (3*16)(oup) - MOVO tmpStore, D3 - - MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 - PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 - PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 - MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) - ADDQ $192, inp - MOVQ $192, itr1 - SUBQ $192, inl - MOVO A3, A1 - MOVO B3, B1 - MOVO C3, C1 - MOVO D3, D1 - CMPQ inl, $64 + MOVOU (SI), X15 + PXOR X15, X0 + MOVOU 16(SI), X15 + PXOR X15, X3 + MOVOU 32(SI), X15 + PXOR X15, X6 + MOVOU 48(SI), X15 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVO 64(BP), X15 + MOVOU 64(SI), X0 + MOVOU 80(SI), X3 + MOVOU 96(SI), X6 + MOVOU 112(SI), X9 + PXOR X0, X1 + PXOR X3, X4 + PXOR X6, X7 + PXOR X9, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVOU 128(SI), X0 + MOVOU 144(SI), X3 + MOVOU 160(SI), X6 + MOVOU 176(SI), X9 + PXOR X0, X2 + PXOR X3, X5 + PXOR X6, X8 + PXOR X9, X11 + MOVOU X2, 128(DI) + MOVOU X5, 144(DI) + MOVOU X8, 160(DI) + MOVOU X11, 176(DI) + ADDQ $0xc0, SI + MOVQ $0x000000c0, CX + SUBQ $0xc0, BX + MOVO X12, X1 + MOVO X13, X4 + MOVO X14, X7 + MOVO X15, X10 + CMPQ BX, $0x40 JBE sealSSE128SealHash - MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 - PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 - MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) - LEAQ 64(inp), inp - SUBQ $64, inl - MOVQ $6, itr1 - MOVQ $4, itr2 - CMPQ inl, $192 + MOVOU (SI), X0 + MOVOU 16(SI), X3 + MOVOU 32(SI), X6 + MOVOU 48(SI), X9 + PXOR X0, X12 + PXOR X3, X13 + PXOR X6, X14 + PXOR X9, X15 + MOVOU X12, 192(DI) + MOVOU X13, 208(DI) + MOVOU X14, 224(DI) + MOVOU X15, 240(DI) + LEAQ 64(SI), SI + SUBQ $0x40, BX + MOVQ $0x00000006, CX + MOVQ $0x00000004, R9 + CMPQ BX, $0xc0 JG sealSSEMainLoop - - MOVQ inl, itr1 - TESTQ inl, inl + MOVQ BX, CX + TESTQ BX, BX JE sealSSE128SealHash - MOVQ $6, itr1 - CMPQ inl, $64 + MOVQ $0x00000006, CX + CMPQ BX, $0x40 JBE sealSSETail64 - CMPQ inl, $128 + CMPQ BX, $0x80 JBE sealSSETail128 JMP sealSSETail192 -// ---------------------------------------------------------------------------- -// Special optimization for the last 64 bytes of plaintext sealSSETail64: - // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A1 - MOVO state1Store, B1 - MOVO state2Store, C1 - MOVO ctr3Store, D1 - PADDL ·sseIncMask<>(SB), D1 - MOVO D1, ctr0Store + MOVO ·chacha20Constants<>+0(SB), X1 + MOVO 32(BP), X4 + MOVO 48(BP), X7 + MOVO 128(BP), X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 80(BP) sealSSETail64LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail64LoopB: - chachaQR(A1, B1, C1, D1, T1) - shiftB1Left; shiftC1Left; shiftD1Left - chachaQR(A1, B1, C1, D1, T1) - shiftB1Right; shiftC1Right; shiftD1Right - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - DECQ itr1 - JG sealSSETail64LoopA - - DECQ itr2 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x0c, X13 + PSRLL $0x14, X4 + PXOR X13, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x07, X13 + PSRLL $0x19, X4 + PXOR X13, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x0c, X13 + PSRLL $0x14, X4 + PXOR X13, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X13) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X13 + PSLLL $0x07, X13 + PSRLL $0x19, X4 + PXOR X13, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + DECQ CX + JG sealSSETail64LoopA + DECQ R9 JGE sealSSETail64LoopB - PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B1 - PADDL state2Store, C1 - PADDL ctr0Store, D1 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X4 + PADDL 48(BP), X7 + PADDL 80(BP), X10 + JMP sealSSE128Seal - JMP sealSSE128Seal - -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of plaintext sealSSETail128: - // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) sealSSETail128LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail128LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - - DECQ itr1 - JG sealSSETail128LoopA - - DECQ itr2 - JGE sealSSETail128LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 - PADDL state1Store, B0; PADDL state1Store, B1 - PADDL state2Store, C0; PADDL state2Store, C1 - PADDL ctr0Store, D0; PADDL ctr1Store, D1 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - - MOVQ $64, itr1 - LEAQ 64(inp), inp - SUBQ $64, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 192 bytes of plaintext + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + DECQ CX + JG sealSSETail128LoopA + DECQ R9 + JGE sealSSETail128LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 80(BP), X9 + PADDL 96(BP), X10 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X0 + PXOR X13, X3 + PXOR X14, X6 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVQ $0x00000040, CX + LEAQ 64(SI), SI + SUBQ $0x40, BX + JMP sealSSE128SealHash + sealSSETail192: - // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes - MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store + MOVO ·chacha20Constants<>+0(SB), X0 + MOVO 32(BP), X3 + MOVO 48(BP), X6 + MOVO 128(BP), X9 + PADDL ·sseIncMask<>+0(SB), X9 + MOVO X9, 80(BP) + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X10, 96(BP) + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X11, 112(BP) sealSSETail192LoopA: - // Perform ChaCha rounds, while hashing the previously encrypted ciphertext - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealSSETail192LoopB: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftC0Left; shiftD0Left - shiftB1Left; shiftC1Left; shiftD1Left - shiftB2Left; shiftC2Left; shiftD2Left - - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup - - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftC0Right; shiftD0Right - shiftB1Right; shiftC1Right; shiftD1Right - shiftB2Right; shiftC2Right; shiftD2Right - - DECQ itr1 - JG sealSSETail192LoopA - - DECQ itr2 - JGE sealSSETail192LoopB - - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 - PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 - PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 - - MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 - PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 - MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) - MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 - PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 - MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) - - MOVO A2, A1 - MOVO B2, B1 - MOVO C2, C1 - MOVO D2, D1 - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - - JMP sealSSE128SealHash - -// ---------------------------------------------------------------------------- -// Special seal optimization for buffers smaller than 129 bytes + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ CX + JG sealSSETail192LoopA + DECQ R9 + JGE sealSSETail192LoopB + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL 32(BP), X3 + PADDL 32(BP), X4 + PADDL 32(BP), X5 + PADDL 48(BP), X6 + PADDL 48(BP), X7 + PADDL 48(BP), X8 + PADDL 80(BP), X9 + PADDL 96(BP), X10 + PADDL 112(BP), X11 + MOVOU (SI), X12 + MOVOU 16(SI), X13 + MOVOU 32(SI), X14 + MOVOU 48(SI), X15 + PXOR X12, X0 + PXOR X13, X3 + PXOR X14, X6 + PXOR X15, X9 + MOVOU X0, (DI) + MOVOU X3, 16(DI) + MOVOU X6, 32(DI) + MOVOU X9, 48(DI) + MOVOU 64(SI), X12 + MOVOU 80(SI), X13 + MOVOU 96(SI), X14 + MOVOU 112(SI), X15 + PXOR X12, X1 + PXOR X13, X4 + PXOR X14, X7 + PXOR X15, X10 + MOVOU X1, 64(DI) + MOVOU X4, 80(DI) + MOVOU X7, 96(DI) + MOVOU X10, 112(DI) + MOVO X2, X1 + MOVO X5, X4 + MOVO X8, X7 + MOVO X11, X10 + MOVQ $0x00000080, CX + LEAQ 128(SI), SI + SUBQ $0x80, BX + JMP sealSSE128SealHash + sealSSE128: - // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks - MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 - MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 - MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 - MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 - MOVQ $10, itr2 + MOVOU ·chacha20Constants<>+0(SB), X0 + MOVOU 16(R8), X3 + MOVOU 32(R8), X6 + MOVOU 48(R8), X9 + MOVO X0, X1 + MOVO X3, X4 + MOVO X6, X7 + MOVO X9, X10 + PADDL ·sseIncMask<>+0(SB), X10 + MOVO X1, X2 + MOVO X4, X5 + MOVO X7, X8 + MOVO X10, X11 + PADDL ·sseIncMask<>+0(SB), X11 + MOVO X3, X13 + MOVO X6, X14 + MOVO X10, X15 + MOVQ $0x0000000a, R9 sealSSE128InnerCipherLoop: - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Left; shiftB1Left; shiftB2Left - shiftC0Left; shiftC1Left; shiftC2Left - shiftD0Left; shiftD1Left; shiftD2Left - chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) - shiftB0Right; shiftB1Right; shiftB2Right - shiftC0Right; shiftC1Right; shiftC2Right - shiftD0Right; shiftD1Right; shiftD2Right - DECQ itr2 - JNE sealSSE128InnerCipherLoop + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x04 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x0c + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + PADDD X3, X0 + PXOR X0, X9 + ROL16(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X3 + PXOR X12, X3 + PADDD X3, X0 + PXOR X0, X9 + ROL8(X9, X12) + PADDD X9, X6 + PXOR X6, X3 + MOVO X3, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X3 + PXOR X12, X3 + PADDD X4, X1 + PXOR X1, X10 + ROL16(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X4 + PXOR X12, X4 + PADDD X4, X1 + PXOR X1, X10 + ROL8(X10, X12) + PADDD X10, X7 + PXOR X7, X4 + MOVO X4, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X4 + PXOR X12, X4 + PADDD X5, X2 + PXOR X2, X11 + ROL16(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x0c, X12 + PSRLL $0x14, X5 + PXOR X12, X5 + PADDD X5, X2 + PXOR X2, X11 + ROL8(X11, X12) + PADDD X11, X8 + PXOR X8, X5 + MOVO X5, X12 + PSLLL $0x07, X12 + PSRLL $0x19, X5 + PXOR X12, X5 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xe4 + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xed + BYTE $0x0c + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xf6 + BYTE $0x08 + BYTE $0x66 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xff + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc0 + BYTE $0x08 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xc9 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xd2 + BYTE $0x04 + BYTE $0x66 + BYTE $0x45 + BYTE $0x0f + BYTE $0x3a + BYTE $0x0f + BYTE $0xdb + BYTE $0x04 + DECQ R9 + JNE sealSSE128InnerCipherLoop // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded - PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 - PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 - PADDL T2, C1; PADDL T2, C2 - PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 - PAND ·polyClampMask<>(SB), A0 - MOVOU A0, rStore - MOVOU B0, sStore + PADDL ·chacha20Constants<>+0(SB), X0 + PADDL ·chacha20Constants<>+0(SB), X1 + PADDL ·chacha20Constants<>+0(SB), X2 + PADDL X13, X3 + PADDL X13, X4 + PADDL X13, X5 + PADDL X14, X7 + PADDL X14, X8 + PADDL X15, X10 + PADDL ·sseIncMask<>+0(SB), X15 + PADDL X15, X11 + PAND ·polyClampMask<>+0(SB), X0 + MOVOU X0, (BP) + MOVOU X3, 16(BP) // Hash - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX sealSSE128SealHash: - // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealSSE128Seal - polyAdd(0(oup)) - polyMul - - SUBQ $16, itr1 - ADDQ $16, oup - - JMP sealSSE128SealHash + CMPQ CX, $0x10 + JB sealSSE128Seal + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX + ADDQ $0x10, DI + JMP sealSSE128SealHash sealSSE128Seal: - CMPQ inl, $16 + CMPQ BX, $0x10 JB sealSSETail - SUBQ $16, inl + SUBQ $0x10, BX // Load for decryption - MOVOU (inp), T0 - PXOR T0, A1 - MOVOU A1, (oup) - LEAQ (1*16)(inp), inp - LEAQ (1*16)(oup), oup + MOVOU (SI), X12 + PXOR X12, X1 + MOVOU X1, (DI) + LEAQ 16(SI), SI + LEAQ 16(DI), DI // Extract for hashing - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul + MOVQ X1, R13 + PSRLDQ $0x08, X1 + MOVQ X1, R14 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Shift the stream "left" - MOVO B1, A1 - MOVO C1, B1 - MOVO D1, C1 - MOVO A2, D1 - MOVO B2, A2 - MOVO C2, B2 - MOVO D2, C2 + MOVO X4, X1 + MOVO X7, X4 + MOVO X10, X7 + MOVO X2, X10 + MOVO X5, X2 + MOVO X8, X5 + MOVO X11, X8 JMP sealSSE128Seal sealSSETail: - TESTQ inl, inl + TESTQ BX, BX JE sealSSEFinalize // We can only load the PT one byte at a time to avoid read after end of buffer - MOVQ inl, itr2 - SHLQ $4, itr2 - LEAQ ·andMask<>(SB), t0 - MOVQ inl, itr1 - LEAQ -1(inp)(inl*1), inp - XORQ t2, t2 - XORQ t3, t3 + MOVQ BX, R9 + SHLQ $0x04, R9 + LEAQ ·andMask<>+0(SB), R13 + MOVQ BX, CX + LEAQ -1(SI)(BX*1), SI + XORQ R15, R15 + XORQ R8, R8 XORQ AX, AX sealSSETailLoadLoop: - SHLQ $8, t2, t3 - SHLQ $8, t2 - MOVB (inp), AX - XORQ AX, t2 - LEAQ -1(inp), inp - DECQ itr1 + SHLQ $0x08, R15, R8 + SHLQ $0x08, R15 + MOVB (SI), AX + XORQ AX, R15 + LEAQ -1(SI), SI + DECQ CX JNE sealSSETailLoadLoop - MOVQ t2, 0+tmpStore - MOVQ t3, 8+tmpStore - PXOR 0+tmpStore, A1 - MOVOU A1, (oup) - MOVOU -16(t0)(itr2*1), T0 - PAND T0, A1 - MOVQ A1, t0 - PSRLDQ $8, A1 - MOVQ A1, t1 - ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 - polyMul - - ADDQ inl, oup + MOVQ R15, 64(BP) + MOVQ R8, 72(BP) + PXOR 64(BP), X1 + MOVOU X1, (DI) + MOVOU -16(R13)(R9*1), X12 + PAND X12, X1 + MOVQ X1, R13 + PSRLDQ $0x08, X1 + MOVQ X1, R14 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ BX, DI sealSSEFinalize: // Hash in the buffer lengths - ADDQ ad_len+80(FP), acc0 - ADCQ src_len+56(FP), acc1 - ADCQ $1, acc2 - polyMul + ADDQ ad_len+80(FP), R10 + ADCQ src_len+56(FP), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 // Final reduce - MOVQ acc0, t0 - MOVQ acc1, t1 - MOVQ acc2, t2 - SUBQ $-5, acc0 - SBBQ $-1, acc1 - SBBQ $3, acc2 - CMOVQCS t0, acc0 - CMOVQCS t1, acc1 - CMOVQCS t2, acc2 + MOVQ R10, R13 + MOVQ R11, R14 + MOVQ R12, R15 + SUBQ $-5, R10 + SBBQ $-1, R11 + SBBQ $0x03, R12 + CMOVQCS R13, R10 + CMOVQCS R14, R11 + CMOVQCS R15, R12 // Add in the "s" part of the key - ADDQ 0+sStore, acc0 - ADCQ 8+sStore, acc1 + ADDQ 16(BP), R10 + ADCQ 24(BP), R11 // Finally store the tag at the end of the message - MOVQ acc0, (0*8)(oup) - MOVQ acc1, (1*8)(oup) + MOVQ R10, (DI) + MOVQ R11, 8(DI) RET -// ---------------------------------------------------------------------------- -// ------------------------- AVX2 Code ---------------------------------------- chacha20Poly1305Seal_AVX2: VZEROUPPER - VMOVDQU ·chacha20Constants<>(SB), AA0 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 - BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 - BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 - VPADDD ·avx2InitMask<>(SB), DD0, DD0 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x70 + BYTE $0x10 + BYTE $0xc4 + BYTE $0x42 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x20 + BYTE $0xc4 + BYTE $0xc2 + BYTE $0x7d + BYTE $0x5a + BYTE $0x60 + BYTE $0x30 + VPADDD ·avx2InitMask<>+0(SB), Y4, Y4 // Special optimizations, for very short buffers - CMPQ inl, $192 - JBE seal192AVX2 // 33% faster - CMPQ inl, $320 - JBE seal320AVX2 // 17% faster + CMPQ BX, $0x000000c0 + JBE seal192AVX2 + CMPQ BX, $0x00000140 + JBE seal320AVX2 // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream - VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 - VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 - VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 - VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA Y14, 32(BP) + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA Y12, 64(BP) + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, 96(BP) + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y1, 128(BP) + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + MOVQ $0x0000000a, R9 sealAVX2IntroLoop: - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr2 - JNE sealAVX2IntroLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - - VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 - VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key - VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y3, Y3, Y3 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ R9 + JNE sealAVX2IntroLoop + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPERM2I128 $0x02, Y0, Y14, Y4 + VPERM2I128 $0x13, Y0, Y14, Y0 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), DD0, DD0 - VMOVDQA DD0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y4, Y4 + VMOVDQA Y4, (BP) // Hash AD - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) // Can store at least 320 bytes - VPXOR (0*32)(inp), AA0, AA0 - VPXOR (1*32)(inp), CC0, CC0 - VMOVDQU AA0, (0*32)(oup) - VMOVDQU CC0, (1*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 - VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 - VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) - - MOVQ $320, itr1 - SUBQ $320, inl - LEAQ 320(inp), inp - - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 - CMPQ inl, $128 + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y12, Y12 + VMOVDQU Y0, (DI) + VMOVDQU Y12, 32(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 64(SI), Y0, Y0 + VPXOR 96(SI), Y14, Y14 + VPXOR 128(SI), Y12, Y12 + VPXOR 160(SI), Y4, Y4 + VMOVDQU Y0, 64(DI) + VMOVDQU Y14, 96(DI) + VMOVDQU Y12, 128(DI) + VMOVDQU Y4, 160(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 192(SI), Y0, Y0 + VPXOR 224(SI), Y14, Y14 + VPXOR 256(SI), Y12, Y12 + VPXOR 288(SI), Y4, Y4 + VMOVDQU Y0, 192(DI) + VMOVDQU Y14, 224(DI) + VMOVDQU Y12, 256(DI) + VMOVDQU Y4, 288(DI) + MOVQ $0x00000140, CX + SUBQ $0x00000140, BX + LEAQ 320(SI), SI + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, Y15, Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, Y15, Y3, Y4 + CMPQ BX, $0x80 JBE sealAVX2SealHash - - VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 - VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) - SUBQ $128, inl - LEAQ 128(inp), inp - - MOVQ $8, itr1 - MOVQ $2, itr2 - - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - CMPQ inl, $512 - JBE sealAVX2Tail512 + VPXOR (SI), Y0, Y0 + VPXOR 32(SI), Y14, Y14 + VPXOR 64(SI), Y12, Y12 + VPXOR 96(SI), Y4, Y4 + VMOVDQU Y0, 320(DI) + VMOVDQU Y14, 352(DI) + VMOVDQU Y12, 384(DI) + VMOVDQU Y4, 416(DI) + SUBQ $0x80, BX + LEAQ 128(SI), SI + MOVQ $0x00000008, CX + MOVQ $0x00000002, R9 + CMPQ BX, $0x80 + JBE sealAVX2Tail128 + CMPQ BX, $0x00000100 + JBE sealAVX2Tail256 + CMPQ BX, $0x00000180 + JBE sealAVX2Tail384 + CMPQ BX, $0x00000200 + JBE sealAVX2Tail512 // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 - VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 - VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 - VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 - - VMOVDQA CC3, tmpStoreAVX2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) - VMOVDQA tmpStoreAVX2, CC3 - VMOVDQA CC1, tmpStoreAVX2 - chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) - VMOVDQA tmpStoreAVX2, CC1 - - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 - VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 - VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 - VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - - SUBQ $16, oup // Adjust the pointer - MOVQ $9, itr1 - JMP sealAVX2InternalLoopStart + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y3, Y3, Y3 + VMOVDQA Y15, 224(BP) + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VMOVDQA 224(BP), Y15 + VMOVDQA Y13, 224(BP) + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x0c, Y11, Y13 + VPSRLD $0x14, Y11, Y11 + VPXOR Y13, Y11, Y11 + VPADDD Y11, Y7, Y7 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y3, Y15, Y15 + VPXOR Y15, Y11, Y11 + VPSLLD $0x07, Y11, Y13 + VPSRLD $0x19, Y11, Y11 + VPXOR Y13, Y11, Y11 + VMOVDQA 224(BP), Y13 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + SUBQ $0x10, DI + MOVQ $0x00000009, CX + JMP sealAVX2InternalLoopStart sealAVX2MainLoop: - // Load state, increment counter blocks, store the incremented counters - VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 - MOVQ $10, itr1 + VMOVDQU ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) + MOVQ $0x0000000a, CX sealAVX2InternalLoop: - polyAdd(0*8(oup)) - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage1_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulStage2_AVX2 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyMulStage3_AVX2 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 sealAVX2InternalLoopStart: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - polyAdd(2*8(oup)) - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage1_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage2_AVX2 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - polyMulStage3_AVX2 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - polyMulReduceStage - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(4*8(oup)) - LEAQ (6*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulStage1_AVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - polyMulStage2_AVX2 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - polyMulStage3_AVX2 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyMulReduceStage - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - DECQ itr1 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 32(DI), R10 + ADCQ 40(DI), R11 + ADCQ $0x01, R12 + LEAQ 48(DI), DI + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ CX JNE sealAVX2InternalLoop - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) // We only hashed 480 of the 512 bytes available - hash the remaining 32 here - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 - VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 - VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPERM2I128 $0x02, Y0, Y14, Y15 + VPERM2I128 $0x13, Y0, Y14, Y14 + VPERM2I128 $0x02, Y12, Y4, Y0 + VPERM2I128 $0x13, Y12, Y4, Y12 + VPXOR (SI), Y15, Y15 + VPXOR 32(SI), Y0, Y0 + VPXOR 64(SI), Y14, Y14 + VPXOR 96(SI), Y12, Y12 + VMOVDQU Y15, (DI) + VMOVDQU Y0, 32(DI) + VMOVDQU Y14, 64(DI) + VMOVDQU Y12, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) // and here - polyAdd(-2*8(oup)) - polyMulAVX2 - VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 - VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) - LEAQ (32*16)(inp), inp - SUBQ $(32*16), inl - CMPQ inl, $512 + ADDQ -16(DI), R10 + ADCQ -8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + VPXOR 384(SI), Y0, Y0 + VPXOR 416(SI), Y14, Y14 + VPXOR 448(SI), Y12, Y12 + VPXOR 480(SI), Y4, Y4 + VMOVDQU Y0, 384(DI) + VMOVDQU Y14, 416(DI) + VMOVDQU Y12, 448(DI) + VMOVDQU Y4, 480(DI) + LEAQ 512(SI), SI + SUBQ $0x00000200, BX + CMPQ BX, $0x00000200 JG sealAVX2MainLoop // Tail can only hash 480 bytes - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ 32(oup), oup - - MOVQ $10, itr1 - MOVQ $0, itr2 - CMPQ inl, $128 - JBE sealAVX2Tail128 - CMPQ inl, $256 - JBE sealAVX2Tail256 - CMPQ inl, $384 - JBE sealAVX2Tail384 - JMP sealAVX2Tail512 - -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 193 bytes + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + MOVQ $0x0000000a, CX + MOVQ $0x00000000, R9 + CMPQ BX, $0x80 + JBE sealAVX2Tail128 + CMPQ BX, $0x00000100 + JBE sealAVX2Tail256 + CMPQ BX, $0x00000180 + JBE sealAVX2Tail384 + JMP sealAVX2Tail512 + seal192AVX2: - // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks - VMOVDQA AA0, AA1 - VMOVDQA BB0, BB1 - VMOVDQA CC0, CC1 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2 - VMOVDQA BB0, BB2 - VMOVDQA CC0, CC2 - VMOVDQA DD0, DD2 - VMOVDQA DD1, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VMOVDQA Y4, Y2 + VMOVDQA Y1, Y15 + MOVQ $0x0000000a, R9 sealAVX2192InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ R9 JNE sealAVX2192InnerCipherLoop - VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 - VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 - VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 - VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 + VPADDD Y6, Y0, Y0 + VPADDD Y6, Y5, Y5 + VPADDD Y10, Y14, Y14 + VPADDD Y10, Y9, Y9 + VPADDD Y8, Y12, Y12 + VPADDD Y8, Y13, Y13 + VPADDD Y2, Y4, Y4 + VPADDD Y15, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 // Clamp and store poly key - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 192 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 sealAVX2ShortSeal: // Hash aad - MOVQ ad_len+80(FP), itr2 + MOVQ ad_len+80(FP), R9 CALL polyHashADInternal<>(SB) - XORQ itr1, itr1 + XORQ CX, CX sealAVX2SealHash: // itr1 holds the number of bytes encrypted but not yet hashed - CMPQ itr1, $16 - JB sealAVX2ShortSealLoop - polyAdd(0(oup)) - polyMul - SUBQ $16, itr1 - ADDQ $16, oup - JMP sealAVX2SealHash + CMPQ CX, $0x10 + JB sealAVX2ShortSealLoop + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + SUBQ $0x10, CX + ADDQ $0x10, DI + JMP sealAVX2SealHash sealAVX2ShortSealLoop: - CMPQ inl, $32 + CMPQ BX, $0x20 JB sealAVX2ShortTail32 - SUBQ $32, inl + SUBQ $0x20, BX // Load for encryption - VPXOR (inp), AA0, AA0 - VMOVDQU AA0, (oup) - LEAQ (1*32)(inp), inp + VPXOR (SI), Y0, Y0 + VMOVDQU Y0, (DI) + LEAQ 32(SI), SI // Now can hash - polyAdd(0*8(oup)) - polyMulAVX2 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (1*32)(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI // Shift stream left - VMOVDQA BB0, AA0 - VMOVDQA CC0, BB0 - VMOVDQA DD0, CC0 - VMOVDQA AA1, DD0 - VMOVDQA BB1, AA1 - VMOVDQA CC1, BB1 - VMOVDQA DD1, CC1 - VMOVDQA AA2, DD1 - VMOVDQA BB2, AA2 + VMOVDQA Y14, Y0 + VMOVDQA Y12, Y14 + VMOVDQA Y4, Y12 + VMOVDQA Y5, Y4 + VMOVDQA Y9, Y5 + VMOVDQA Y13, Y9 + VMOVDQA Y1, Y13 + VMOVDQA Y6, Y1 + VMOVDQA Y10, Y6 JMP sealAVX2ShortSealLoop sealAVX2ShortTail32: - CMPQ inl, $16 - VMOVDQA A0, A1 + CMPQ BX, $0x10 + VMOVDQA X0, X1 JB sealAVX2ShortDone - - SUBQ $16, inl + SUBQ $0x10, BX // Load for encryption - VPXOR (inp), A0, T0 - VMOVDQU T0, (oup) - LEAQ (1*16)(inp), inp + VPXOR (SI), X0, X12 + VMOVDQU X12, (DI) + LEAQ 16(SI), SI // Hash - polyAdd(0*8(oup)) - polyMulAVX2 - LEAQ (1*16)(oup), oup - VPERM2I128 $0x11, AA0, AA0, AA0 - VMOVDQA A0, A1 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI + VPERM2I128 $0x11, Y0, Y0, Y0 + VMOVDQA X0, X1 sealAVX2ShortDone: VZEROUPPER JMP sealSSETail -// ---------------------------------------------------------------------------- -// Special optimization for buffers smaller than 321 bytes seal320AVX2: - // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks - VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 - MOVQ $10, itr2 + VMOVDQA Y0, Y5 + VMOVDQA Y14, Y9 + VMOVDQA Y12, Y13 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y0, Y6 + VMOVDQA Y14, Y10 + VMOVDQA Y12, Y8 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y14, Y7 + VMOVDQA Y12, Y11 + VMOVDQA Y4, Y15 + MOVQ $0x0000000a, R9 sealAVX2320InnerCipherLoop: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ R9 JNE sealAVX2320InnerCipherLoop - - VMOVDQA ·chacha20Constants<>(SB), TT0 - VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 - VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 - VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 - VMOVDQA ·avx2IncMask<>(SB), TT0 - VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 - VPADDD TT3, DD2, DD2 + VMOVDQA ·chacha20Constants<>+0(SB), Y3 + VPADDD Y3, Y0, Y0 + VPADDD Y3, Y5, Y5 + VPADDD Y3, Y6, Y6 + VPADDD Y7, Y14, Y14 + VPADDD Y7, Y9, Y9 + VPADDD Y7, Y10, Y10 + VPADDD Y11, Y12, Y12 + VPADDD Y11, Y13, Y13 + VPADDD Y11, Y8, Y8 + VMOVDQA ·avx2IncMask<>+0(SB), Y3 + VPADDD Y15, Y4, Y4 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y1, Y1 + VPADDD Y3, Y15, Y15 + VPADDD Y15, Y2, Y2 // Clamp and store poly key - VPERM2I128 $0x02, AA0, BB0, TT0 - VPAND ·polyClampMask<>(SB), TT0, TT0 - VMOVDQA TT0, rsStoreAVX2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPAND ·polyClampMask<>+0(SB), Y3, Y3 + VMOVDQA Y3, (BP) // Stream for up to 320 bytes - VPERM2I128 $0x13, AA0, BB0, AA0 - VPERM2I128 $0x13, CC0, DD0, BB0 - VPERM2I128 $0x02, AA1, BB1, CC0 - VPERM2I128 $0x02, CC1, DD1, DD0 - VPERM2I128 $0x13, AA1, BB1, AA1 - VPERM2I128 $0x13, CC1, DD1, BB1 - VPERM2I128 $0x02, AA2, BB2, CC1 - VPERM2I128 $0x02, CC2, DD2, DD1 - VPERM2I128 $0x13, AA2, BB2, AA2 - VPERM2I128 $0x13, CC2, DD2, BB2 + VPERM2I128 $0x13, Y0, Y14, Y0 + VPERM2I128 $0x13, Y12, Y4, Y14 + VPERM2I128 $0x02, Y5, Y9, Y12 + VPERM2I128 $0x02, Y13, Y1, Y4 + VPERM2I128 $0x13, Y5, Y9, Y5 + VPERM2I128 $0x13, Y13, Y1, Y9 + VPERM2I128 $0x02, Y6, Y10, Y13 + VPERM2I128 $0x02, Y8, Y2, Y1 + VPERM2I128 $0x13, Y6, Y10, Y6 + VPERM2I128 $0x13, Y8, Y2, Y10 JMP sealAVX2ShortSeal -// ---------------------------------------------------------------------------- -// Special optimization for the last 128 bytes of ciphertext sealAVX2Tail128: - // Need to decrypt up to 128 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0 - VMOVDQA state1StoreAVX2, BB0 - VMOVDQA state2StoreAVX2, CC0 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VMOVDQA DD0, DD1 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA 32(BP), Y14 + VMOVDQA 64(BP), Y12 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VMOVDQA Y4, Y1 sealAVX2Tail128LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail128LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $12, DD0, DD0, DD0 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0 - VPALIGNR $8, CC0, CC0, CC0 - VPALIGNR $4, DD0, DD0, DD0 - DECQ itr1 - JG sealAVX2Tail128LoopA - DECQ itr2 - JGE sealAVX2Tail128LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA1 - VPADDD state1StoreAVX2, BB0, BB1 - VPADDD state2StoreAVX2, CC0, CC1 - VPADDD DD1, DD0, DD1 - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x04, Y4, Y4, Y4 + DECQ CX + JG sealAVX2Tail128LoopA + DECQ R9 + JGE sealAVX2Tail128LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y5 + VPADDD 32(BP), Y14, Y9 + VPADDD 64(BP), Y12, Y13 + VPADDD Y1, Y4, Y1 + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 JMP sealAVX2ShortSealLoop -// ---------------------------------------------------------------------------- -// Special optimization for the last 256 bytes of ciphertext sealAVX2Tail256: - // Need to decrypt up to 256 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD1 - VMOVDQA DD0, TT1 - VMOVDQA DD1, TT2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA ·chacha20Constants<>+0(SB), Y5 + VMOVDQA 32(BP), Y14 + VMOVDQA 32(BP), Y9 + VMOVDQA 64(BP), Y12 + VMOVDQA 64(BP), Y13 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 sealAVX2Tail256LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail256LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 - DECQ itr1 - JG sealAVX2Tail256LoopA - DECQ itr2 - JGE sealAVX2Tail256LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - MOVQ $128, itr1 - LEAQ 128(inp), inp - SUBQ $128, inl - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 384 bytes of ciphertext + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + DECQ CX + JG sealAVX2Tail256LoopA + DECQ R9 + JGE sealAVX2Tail256LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + MOVQ $0x00000080, CX + LEAQ 128(SI), SI + SUBQ $0x80, BX + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + JMP sealAVX2SealHash + sealAVX2Tail384: - // Need to decrypt up to 384 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 - VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VMOVDQA Y4, Y7 + VMOVDQA Y1, Y11 + VMOVDQA Y2, Y15 sealAVX2Tail384LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail384LoopB: - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(0(oup)) - polyMul - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 - chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) - polyAdd(16(oup)) - polyMul - LEAQ 32(oup), oup - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 - DECQ itr1 - JG sealAVX2Tail384LoopA - DECQ itr2 - JGE sealAVX2Tail384LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 - VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 - VPERM2I128 $0x02, AA0, BB0, TT0 - VPERM2I128 $0x02, CC0, DD0, TT1 - VPERM2I128 $0x13, AA0, BB0, TT2 - VPERM2I128 $0x13, CC0, DD0, TT3 - VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 - VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) - VPERM2I128 $0x02, AA1, BB1, TT0 - VPERM2I128 $0x02, CC1, DD1, TT1 - VPERM2I128 $0x13, AA1, BB1, TT2 - VPERM2I128 $0x13, CC1, DD1, TT3 - VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 - VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) - MOVQ $256, itr1 - LEAQ 256(inp), inp - SUBQ $256, inl - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - - JMP sealAVX2SealHash - -// ---------------------------------------------------------------------------- -// Special optimization for the last 512 bytes of ciphertext + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x0c, Y14, Y3 + VPSRLD $0x14, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y14, Y0, Y0 + VPXOR Y0, Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPADDD Y4, Y12, Y12 + VPXOR Y12, Y14, Y14 + VPSLLD $0x07, Y14, Y3 + VPSRLD $0x19, Y14, Y14 + VPXOR Y3, Y14, Y14 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x0c, Y9, Y3 + VPSRLD $0x14, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y9, Y5, Y5 + VPXOR Y5, Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPADDD Y1, Y13, Y13 + VPXOR Y13, Y9, Y9 + VPSLLD $0x07, Y9, Y3 + VPSRLD $0x19, Y9, Y9 + VPXOR Y3, Y9, Y9 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x0c, Y10, Y3 + VPSRLD $0x14, Y10, Y10 + VPXOR Y3, Y10, Y10 + VPADDD Y10, Y6, Y6 + VPXOR Y6, Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPADDD Y2, Y8, Y8 + VPXOR Y8, Y10, Y10 + VPSLLD $0x07, Y10, Y3 + VPSRLD $0x19, Y10, Y10 + VPXOR Y3, Y10, Y10 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + DECQ CX + JG sealAVX2Tail384LoopA + DECQ R9 + JGE sealAVX2Tail384LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD Y7, Y4, Y4 + VPADDD Y11, Y1, Y1 + VPADDD Y15, Y2, Y2 + VPERM2I128 $0x02, Y0, Y14, Y3 + VPERM2I128 $0x02, Y12, Y4, Y7 + VPERM2I128 $0x13, Y0, Y14, Y11 + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR (SI), Y3, Y3 + VPXOR 32(SI), Y7, Y7 + VPXOR 64(SI), Y11, Y11 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y3, (DI) + VMOVDQU Y7, 32(DI) + VMOVDQU Y11, 64(DI) + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y3 + VPERM2I128 $0x02, Y13, Y1, Y7 + VPERM2I128 $0x13, Y5, Y9, Y11 + VPERM2I128 $0x13, Y13, Y1, Y15 + VPXOR 128(SI), Y3, Y3 + VPXOR 160(SI), Y7, Y7 + VPXOR 192(SI), Y11, Y11 + VPXOR 224(SI), Y15, Y15 + VMOVDQU Y3, 128(DI) + VMOVDQU Y7, 160(DI) + VMOVDQU Y11, 192(DI) + VMOVDQU Y15, 224(DI) + MOVQ $0x00000100, CX + LEAQ 256(SI), SI + SUBQ $0x00000100, BX + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + JMP sealAVX2SealHash + sealAVX2Tail512: - // Need to decrypt up to 512 bytes - prepare two blocks - // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed - // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed - VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 - VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 - VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 - VMOVDQA ctr3StoreAVX2, DD0 - VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 - VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + VMOVDQA ·chacha20Constants<>+0(SB), Y0 + VMOVDQA Y0, Y5 + VMOVDQA Y0, Y6 + VMOVDQA Y0, Y7 + VMOVDQA 32(BP), Y14 + VMOVDQA Y14, Y9 + VMOVDQA Y14, Y10 + VMOVDQA Y14, Y11 + VMOVDQA 64(BP), Y12 + VMOVDQA Y12, Y13 + VMOVDQA Y12, Y8 + VMOVDQA Y12, Y15 + VMOVDQA 192(BP), Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y4 + VPADDD ·avx2IncMask<>+0(SB), Y4, Y1 + VPADDD ·avx2IncMask<>+0(SB), Y1, Y2 + VPADDD ·avx2IncMask<>+0(SB), Y2, Y3 + VMOVDQA Y4, 96(BP) + VMOVDQA Y1, 128(BP) + VMOVDQA Y2, 160(BP) + VMOVDQA Y3, 192(BP) sealAVX2Tail512LoopA: - polyAdd(0(oup)) - polyMul - LEAQ 16(oup), oup + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), AX + MOVQ AX, R15 + MULQ R10 + MOVQ AX, R13 + MOVQ DX, R14 + MOVQ (BP), AX + MULQ R11 + IMULQ R12, R15 + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), AX + MOVQ AX, R8 + MULQ R10 + ADDQ AX, R14 + ADCQ $0x00, DX + MOVQ DX, R10 + MOVQ 8(BP), AX + MULQ R11 + ADDQ AX, R15 + ADCQ $0x00, DX + IMULQ R12, R8 + ADDQ R10, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 16(DI), DI sealAVX2Tail512LoopB: - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - polyAdd(0*8(oup)) - polyMulAVX2 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - polyAdd(2*8(oup)) - polyMulAVX2 - LEAQ (4*8)(oup), oup - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 - VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 - VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 - VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 - VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 - VMOVDQA CC3, tmpStoreAVX2 - VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 - VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 - VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 - VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 - VMOVDQA tmpStoreAVX2, CC3 - VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 - VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 - VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 - - DECQ itr1 - JG sealAVX2Tail512LoopA - DECQ itr2 - JGE sealAVX2Tail512LoopB - - VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 - VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 - VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 - VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 - VMOVDQA CC3, tmpStoreAVX2 - VPERM2I128 $0x02, AA0, BB0, CC3 - VPXOR (0*32)(inp), CC3, CC3 - VMOVDQU CC3, (0*32)(oup) - VPERM2I128 $0x02, CC0, DD0, CC3 - VPXOR (1*32)(inp), CC3, CC3 - VMOVDQU CC3, (1*32)(oup) - VPERM2I128 $0x13, AA0, BB0, CC3 - VPXOR (2*32)(inp), CC3, CC3 - VMOVDQU CC3, (2*32)(oup) - VPERM2I128 $0x13, CC0, DD0, CC3 - VPXOR (3*32)(inp), CC3, CC3 - VMOVDQU CC3, (3*32)(oup) - - VPERM2I128 $0x02, AA1, BB1, AA0 - VPERM2I128 $0x02, CC1, DD1, BB0 - VPERM2I128 $0x13, AA1, BB1, CC0 - VPERM2I128 $0x13, CC1, DD1, DD0 - VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 - VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) - - VPERM2I128 $0x02, AA2, BB2, AA0 - VPERM2I128 $0x02, CC2, DD2, BB0 - VPERM2I128 $0x13, AA2, BB2, CC0 - VPERM2I128 $0x13, CC2, DD2, DD0 - VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 - VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) - - MOVQ $384, itr1 - LEAQ 384(inp), inp - SUBQ $384, inl - VPERM2I128 $0x02, AA3, BB3, AA0 - VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 - VPERM2I128 $0x13, AA3, BB3, CC0 - VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 - - JMP sealAVX2SealHash + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + ADDQ (DI), R10 + ADCQ 8(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x04, Y14, Y14, Y14 + VPALIGNR $0x04, Y9, Y9, Y9 + VPALIGNR $0x04, Y10, Y10, Y10 + VPALIGNR $0x04, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x0c, Y4, Y4, Y4 + VPALIGNR $0x0c, Y1, Y1, Y1 + VPALIGNR $0x0c, Y2, Y2, Y2 + VPALIGNR $0x0c, Y3, Y3, Y3 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol16<>+0(SB), Y4, Y4 + VPSHUFB ·rol16<>+0(SB), Y1, Y1 + VPSHUFB ·rol16<>+0(SB), Y2, Y2 + VPSHUFB ·rol16<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + ADDQ 16(DI), R10 + ADCQ 24(DI), R11 + ADCQ $0x01, R12 + MOVQ (BP), DX + MOVQ DX, R15 + MULXQ R10, R13, R14 + IMULQ R12, R15 + MULXQ R11, AX, DX + ADDQ AX, R14 + ADCQ DX, R15 + MOVQ 8(BP), DX + MULXQ R10, R10, AX + ADDQ R10, R14 + MULXQ R11, R11, R8 + ADCQ R11, R15 + ADCQ $0x00, R8 + IMULQ R12, DX + ADDQ AX, R15 + ADCQ DX, R8 + MOVQ R13, R10 + MOVQ R14, R11 + MOVQ R15, R12 + ANDQ $0x03, R12 + MOVQ R15, R13 + ANDQ $-4, R13 + MOVQ R8, R14 + SHRQ $0x02, R8, R15 + SHRQ $0x02, R8 + ADDQ R13, R10 + ADCQ R14, R11 + ADCQ $0x00, R12 + ADDQ R15, R10 + ADCQ R8, R11 + ADCQ $0x00, R12 + LEAQ 32(DI), DI + VMOVDQA Y15, 224(BP) + VPSLLD $0x0c, Y14, Y15 + VPSRLD $0x14, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x0c, Y9, Y15 + VPSRLD $0x14, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x0c, Y10, Y15 + VPSRLD $0x14, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x0c, Y11, Y15 + VPSRLD $0x14, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPADDD Y14, Y0, Y0 + VPADDD Y9, Y5, Y5 + VPADDD Y10, Y6, Y6 + VPADDD Y11, Y7, Y7 + VPXOR Y0, Y4, Y4 + VPXOR Y5, Y1, Y1 + VPXOR Y6, Y2, Y2 + VPXOR Y7, Y3, Y3 + VPSHUFB ·rol8<>+0(SB), Y4, Y4 + VPSHUFB ·rol8<>+0(SB), Y1, Y1 + VPSHUFB ·rol8<>+0(SB), Y2, Y2 + VPSHUFB ·rol8<>+0(SB), Y3, Y3 + VPADDD Y4, Y12, Y12 + VPADDD Y1, Y13, Y13 + VPADDD Y2, Y8, Y8 + VPADDD Y3, Y15, Y15 + VPXOR Y12, Y14, Y14 + VPXOR Y13, Y9, Y9 + VPXOR Y8, Y10, Y10 + VPXOR Y15, Y11, Y11 + VMOVDQA Y15, 224(BP) + VPSLLD $0x07, Y14, Y15 + VPSRLD $0x19, Y14, Y14 + VPXOR Y15, Y14, Y14 + VPSLLD $0x07, Y9, Y15 + VPSRLD $0x19, Y9, Y9 + VPXOR Y15, Y9, Y9 + VPSLLD $0x07, Y10, Y15 + VPSRLD $0x19, Y10, Y10 + VPXOR Y15, Y10, Y10 + VPSLLD $0x07, Y11, Y15 + VPSRLD $0x19, Y11, Y11 + VPXOR Y15, Y11, Y11 + VMOVDQA 224(BP), Y15 + VPALIGNR $0x0c, Y14, Y14, Y14 + VPALIGNR $0x0c, Y9, Y9, Y9 + VPALIGNR $0x0c, Y10, Y10, Y10 + VPALIGNR $0x0c, Y11, Y11, Y11 + VPALIGNR $0x08, Y12, Y12, Y12 + VPALIGNR $0x08, Y13, Y13, Y13 + VPALIGNR $0x08, Y8, Y8, Y8 + VPALIGNR $0x08, Y15, Y15, Y15 + VPALIGNR $0x04, Y4, Y4, Y4 + VPALIGNR $0x04, Y1, Y1, Y1 + VPALIGNR $0x04, Y2, Y2, Y2 + VPALIGNR $0x04, Y3, Y3, Y3 + DECQ CX + JG sealAVX2Tail512LoopA + DECQ R9 + JGE sealAVX2Tail512LoopB + VPADDD ·chacha20Constants<>+0(SB), Y0, Y0 + VPADDD ·chacha20Constants<>+0(SB), Y5, Y5 + VPADDD ·chacha20Constants<>+0(SB), Y6, Y6 + VPADDD ·chacha20Constants<>+0(SB), Y7, Y7 + VPADDD 32(BP), Y14, Y14 + VPADDD 32(BP), Y9, Y9 + VPADDD 32(BP), Y10, Y10 + VPADDD 32(BP), Y11, Y11 + VPADDD 64(BP), Y12, Y12 + VPADDD 64(BP), Y13, Y13 + VPADDD 64(BP), Y8, Y8 + VPADDD 64(BP), Y15, Y15 + VPADDD 96(BP), Y4, Y4 + VPADDD 128(BP), Y1, Y1 + VPADDD 160(BP), Y2, Y2 + VPADDD 192(BP), Y3, Y3 + VMOVDQA Y15, 224(BP) + VPERM2I128 $0x02, Y0, Y14, Y15 + VPXOR (SI), Y15, Y15 + VMOVDQU Y15, (DI) + VPERM2I128 $0x02, Y12, Y4, Y15 + VPXOR 32(SI), Y15, Y15 + VMOVDQU Y15, 32(DI) + VPERM2I128 $0x13, Y0, Y14, Y15 + VPXOR 64(SI), Y15, Y15 + VMOVDQU Y15, 64(DI) + VPERM2I128 $0x13, Y12, Y4, Y15 + VPXOR 96(SI), Y15, Y15 + VMOVDQU Y15, 96(DI) + VPERM2I128 $0x02, Y5, Y9, Y0 + VPERM2I128 $0x02, Y13, Y1, Y14 + VPERM2I128 $0x13, Y5, Y9, Y12 + VPERM2I128 $0x13, Y13, Y1, Y4 + VPXOR 128(SI), Y0, Y0 + VPXOR 160(SI), Y14, Y14 + VPXOR 192(SI), Y12, Y12 + VPXOR 224(SI), Y4, Y4 + VMOVDQU Y0, 128(DI) + VMOVDQU Y14, 160(DI) + VMOVDQU Y12, 192(DI) + VMOVDQU Y4, 224(DI) + VPERM2I128 $0x02, Y6, Y10, Y0 + VPERM2I128 $0x02, Y8, Y2, Y14 + VPERM2I128 $0x13, Y6, Y10, Y12 + VPERM2I128 $0x13, Y8, Y2, Y4 + VPXOR 256(SI), Y0, Y0 + VPXOR 288(SI), Y14, Y14 + VPXOR 320(SI), Y12, Y12 + VPXOR 352(SI), Y4, Y4 + VMOVDQU Y0, 256(DI) + VMOVDQU Y14, 288(DI) + VMOVDQU Y12, 320(DI) + VMOVDQU Y4, 352(DI) + MOVQ $0x00000180, CX + LEAQ 384(SI), SI + SUBQ $0x00000180, BX + VPERM2I128 $0x02, Y7, Y11, Y0 + VPERM2I128 $0x02, 224(BP), Y3, Y14 + VPERM2I128 $0x13, Y7, Y11, Y12 + VPERM2I128 $0x13, 224(BP), Y3, Y4 + JMP sealAVX2SealHash diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s index e0d3c64756692..133757384b787 100644 --- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s +++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s @@ -1,108 +1,93 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run sum_amd64_asm.go -out ../sum_amd64.s -pkg poly1305. DO NOT EDIT. //go:build gc && !purego -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -// func update(state *[7]uint64, msg []byte) +// func update(state *macState, msg []byte) TEXT ·update(SB), $0-32 MOVQ state+0(FP), DI MOVQ msg_base+8(FP), SI MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 + MOVQ (DI), R8 + MOVQ 8(DI), R9 + MOVQ 16(DI), R10 + MOVQ 24(DI), R11 + MOVQ 32(DI), R12 + CMPQ R15, $0x10 JB bytes_between_0_and_15 loop: - POLY1305_ADD(SI, R8, R9, R10) + ADDQ (SI), R8 + ADCQ 8(SI), R9 + ADCQ $0x01, R10 + LEAQ 16(SI), SI multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop + MOVQ R11, AX + MULQ R8 + MOVQ AX, BX + MOVQ DX, CX + MOVQ R11, AX + MULQ R9 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ R11, R13 + IMULQ R10, R13 + ADDQ DX, R13 + MOVQ R12, AX + MULQ R8 + ADDQ AX, CX + ADCQ $0x00, DX + MOVQ DX, R8 + MOVQ R12, R14 + IMULQ R10, R14 + MOVQ R12, AX + MULQ R9 + ADDQ AX, R13 + ADCQ DX, R14 + ADDQ R8, R13 + ADCQ $0x00, R14 + MOVQ BX, R8 + MOVQ CX, R9 + MOVQ R13, R10 + ANDQ $0x03, R10 + MOVQ R13, BX + ANDQ $-4, BX + ADDQ BX, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SHRQ $0x02, R14, R13 + SHRQ $0x02, R14 + ADDQ R13, R8 + ADCQ R14, R9 + ADCQ $0x00, R10 + SUBQ $0x10, R15 + CMPQ R15, $0x10 + JAE loop bytes_between_0_and_15: TESTQ R15, R15 JZ done - MOVQ $1, BX + MOVQ $0x00000001, BX XORQ CX, CX XORQ R13, R13 ADDQ R15, SI flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX + SHLQ $0x08, BX, CX + SHLQ $0x08, BX MOVB -1(SI), R13 XORQ R13, BX DECQ SI DECQ R15 JNZ flush_buffer - ADDQ BX, R8 ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 + ADCQ $0x00, R10 + MOVQ $0x00000010, R15 JMP multiply done: - MOVQ R8, 0(DI) + MOVQ R8, (DI) MOVQ R9, 8(DI) MOVQ R10, 16(DI) RET diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb332174885..109997d77cea5 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index ec07aab05788c..02609d5b21d56 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -201,6 +201,25 @@ var S390X struct { _ CacheLinePad } +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + _ CacheLinePad +} + func init() { archInit() initOptions() diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index cd63e73355734..7d902b6847bbf 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 0000000000000..cb4a0c5728073 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,137 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index 7f0c79c004b44..aca3199c91169 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -8,4 +8,13 @@ package cpu const cacheLineSize = 64 -func initOptions() {} +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + } +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d07dd09eb503c..e14b766a32c5f 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -552,6 +552,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 2d15200adb497..099867deedec6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) { return pthread_fchdir_np(fd) } +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f8e38a..a6a2d2fc2b90c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 4308ac1772b9f..d73c4652e6c54 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index c8068a7a16900..4a55a40058894 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab3d939..1ec2b1407b126 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index b622533ef2c2b..24b346e1a3515 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index cfe6646baf230..ebd213100b352 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 13f624f69f19d..824b9c2d5e0e0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index fe222b75df04f..4f178a229345e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f3a5c9..d003c3d43780c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef74d0d3..0d45a941aaecc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a3aa19..51e13eb055fc5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee77fa0..d002d8ef3cc2d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee8b133..3f863d898dd85 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12acfd9d2..61c7293106616 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c7900660..b5d17414f039f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 7f1961b907a2b..9f2550dc3120d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2486,7 +2486,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc04142f2b..ad05b51a60364 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 1fa34fd17c5cd..5cee9a3143fd5 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -313,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 3f03b3d57cc35..7b97a154c9573 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9bb979a3e4726..4c2e1bdc01ed3 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -247,7 +247,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -347,8 +349,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -2162,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2170,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3038,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3054,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index 465f560604e41..df6bf948e1407 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -26,6 +26,7 @@ func makeRaw(fd int) (*State, error) { return nil, err } raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err } diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 7699dd712d635..fcf10a73c2780 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -1915,7 +1915,7 @@ ] }, "listUsable": { - "description": "Retrieves an aggregated list of all usable backend services in the specified project.", + "description": "Retrieves a list of all usable backend services in the specified project.", "flatPath": "projects/{project}/global/backendServices/listUsable", "httpMethod": "GET", "id": "compute.backendServices.listUsable", @@ -20638,7 +20638,7 @@ ] }, "listUsable": { - "description": "Retrieves an aggregated list of all usable backend services in the specified project in the given region.", + "description": "Retrieves a list of all usable backend services in the specified project in the given region.", "flatPath": "projects/{project}/regions/{region}/backendServices/listUsable", "httpMethod": "GET", "id": "compute.regionBackendServices.listUsable", @@ -37779,7 +37779,7 @@ } } }, - "revision": "20240813", + "revision": "20240903", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -39033,6 +39033,10 @@ "format": "int32", "type": "integer" }, + "turboMode": { + "description": "Turbo frequency mode to use for the instance. Supported modes include: * ALL_CORE_MAX Using empty string or not setting this field will use the platform-specific default turbo mode.", + "type": "string" + }, "visibleCoreCount": { "description": "The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance's nominal CPU count and the underlying platform's SMT width.", "format": "int32", @@ -40484,6 +40488,13 @@ "selfLink": { "description": "[Output Only] Server-defined URL for the resource.", "type": "string" + }, + "usedBy": { + "description": "[Output Only] List of resources referencing that backend bucket.", + "items": { + "$ref": "BackendBucketUsedBy" + }, + "type": "array" } }, "type": "object" @@ -40770,6 +40781,16 @@ }, "type": "object" }, + "BackendBucketUsedBy": { + "id": "BackendBucketUsedBy", + "properties": { + "reference": { + "description": "[Output Only] Server-defined URL for UrlMaps referencing that BackendBucket.", + "type": "string" + } + }, + "type": "object" + }, "BackendService": { "description": "Represents a Backend Service resource. A backend service defines how Google Cloud load balancers distribute traffic. The backend service configuration contains a set of values, such as the protocol used to connect to backends, various distribution and session settings, health checks, and timeouts. These settings provide fine-grained control over how your load balancer behaves. Most of the settings have default values that allow for easy configuration if you need to get started quickly. Backend services in Google Compute Engine can be either regionally or globally scoped. * [Global](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) * [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/regionBackendServices) For more information, see Backend Services.", "id": "BackendService", @@ -47965,6 +47986,7 @@ "SEV_LIVE_MIGRATABLE", "SEV_LIVE_MIGRATABLE_V2", "SEV_SNP_CAPABLE", + "TDX_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS" @@ -47981,6 +48003,7 @@ "", "", "", + "", "" ], "type": "string" @@ -60154,6 +60177,11 @@ "description": "Metadata defined as annotations on the network endpoint.", "type": "object" }, + "clientDestinationPort": { + "description": "Represents the port number to which PSC consumer sends packets. Only valid for network endpoint groups created with GCE_VM_IP_PORTMAP endpoint type.", + "format": "int32", + "type": "integer" + }, "fqdn": { "description": "Optional fully qualified domain name of network endpoint. This can only be specified when NetworkEndpointGroup.network_endpoint_type is NON_GCP_FQDN_PORT.", "type": "string" @@ -60233,6 +60261,7 @@ "enum": [ "GCE_VM_IP", "GCE_VM_IP_PORT", + "GCE_VM_IP_PORTMAP", "INTERNET_FQDN_PORT", "INTERNET_IP_PORT", "NON_GCP_PRIVATE_IP_PORT", @@ -60242,6 +60271,7 @@ "enumDescriptions": [ "The network endpoint is represented by an IP address.", "The network endpoint is represented by IP address and port pair.", + "The network endpoint is represented by an IP, Port and Client Destination Port.", "The network endpoint is represented by fully qualified domain name and port.", "The network endpoint is represented by an internet IP address and port.", "The network endpoint is represented by an IP address and port. The endpoint belongs to a VM or pod running in a customer's on-premises.", @@ -60652,6 +60682,11 @@ "description": "[Output Only] Address allocated from given subnetwork for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as an endpoint in L7 PSC-XLB.", "type": "string" }, + "producerPort": { + "description": "The psc producer port is used to connect PSC NEG with specific port on the PSC Producer side; should only be used for the PRIVATE_SERVICE_CONNECT NEG type", + "format": "int32", + "type": "integer" + }, "pscConnectionId": { "description": "[Output Only] The PSC connection id of the PSC Network Endpoint Group Consumer.", "format": "uint64", @@ -61453,6 +61488,11 @@ "description": "[Output Only] The name of the firewall policy.", "type": "string" }, + "priority": { + "description": "[Output only] Priority of firewall policy association. Not applicable for type=HIERARCHY.", + "format": "int32", + "type": "integer" + }, "rules": { "description": "The rules that apply to the network.", "items": { @@ -61469,9 +61509,11 @@ "enum": [ "HIERARCHY", "NETWORK", + "SYSTEM", "UNSPECIFIED" ], "enumDescriptions": [ + "", "", "", "" @@ -78057,7 +78099,7 @@ "type": "string" }, "internalIpv6Prefix": { - "description": "[Output Only] The internal IPv6 address range that is assigned to this subnetwork.", + "description": "The internal IPv6 address range that is owned by this subnetwork.", "type": "string" }, "ipCidrRange": { diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 9ea1149d2bd6f..1a0dde8f2d2ca 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -2683,6 +2683,10 @@ type AdvancedMachineFeatures struct { // simultaneous multithreading (SMT) set this to 1. If unset, the maximum // number of threads supported per core by the underlying processor is assumed. ThreadsPerCore int64 `json:"threadsPerCore,omitempty"` + // TurboMode: Turbo frequency mode to use for the instance. Supported modes + // include: * ALL_CORE_MAX Using empty string or not setting this field will + // use the platform-specific default turbo mode. + TurboMode string `json:"turboMode,omitempty"` // VisibleCoreCount: The number of physical cores to expose to an instance. // Multiply by the number of threads per core to compute the total number of // virtual CPUs to expose to the instance. If unset, the number of cores is @@ -4488,6 +4492,8 @@ type BackendBucket struct { Name string `json:"name,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // UsedBy: [Output Only] List of resources referencing that backend bucket. + UsedBy []*BackendBucketUsedBy `json:"usedBy,omitempty"` // ServerResponse contains the HTTP response code and headers from the server. googleapi.ServerResponse `json:"-"` @@ -4877,6 +4883,28 @@ func (s BackendBucketListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) } +type BackendBucketUsedBy struct { + // Reference: [Output Only] Server-defined URL for UrlMaps referencing that + // BackendBucket. + Reference string `json:"reference,omitempty"` + // ForceSendFields is a list of field names (e.g. "Reference") to + // unconditionally include in API requests. By default, fields with empty or + // default values are omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-ForceSendFields for more + // details. + ForceSendFields []string `json:"-"` + // NullFields is a list of field names (e.g. "Reference") to include in API + // requests with the JSON null value. By default, fields with empty values are + // omitted from API requests. See + // https://pkg.go.dev/google.golang.org/api#hdr-NullFields for more details. + NullFields []string `json:"-"` +} + +func (s BackendBucketUsedBy) MarshalJSON() ([]byte, error) { + type NoMethod BackendBucketUsedBy + return gensupport.MarshalJSON(NoMethod(s), s.ForceSendFields, s.NullFields) +} + // BackendService: Represents a Backend Service resource. A backend service // defines how Google Cloud load balancers distribute traffic. The backend // service configuration contains a set of values, such as the protocol used to @@ -13281,6 +13309,7 @@ type GuestOsFeature struct { // "SEV_LIVE_MIGRATABLE" // "SEV_LIVE_MIGRATABLE_V2" // "SEV_SNP_CAPABLE" + // "TDX_CAPABLE" // "UEFI_COMPATIBLE" // "VIRTIO_SCSI_MULTIQUEUE" // "WINDOWS" @@ -27092,6 +27121,10 @@ func (s NetworkEdgeSecurityServicesScopedListWarningData) MarshalJSON() ([]byte, type NetworkEndpoint struct { // Annotations: Metadata defined as annotations on the network endpoint. Annotations map[string]string `json:"annotations,omitempty"` + // ClientDestinationPort: Represents the port number to which PSC consumer + // sends packets. Only valid for network endpoint groups created with + // GCE_VM_IP_PORTMAP endpoint type. + ClientDestinationPort int64 `json:"clientDestinationPort,omitempty"` // Fqdn: Optional fully qualified domain name of network endpoint. This can // only be specified when NetworkEndpointGroup.network_endpoint_type is // NON_GCP_FQDN_PORT. @@ -27188,6 +27221,8 @@ type NetworkEndpointGroup struct { // "GCE_VM_IP" - The network endpoint is represented by an IP address. // "GCE_VM_IP_PORT" - The network endpoint is represented by IP address and // port pair. + // "GCE_VM_IP_PORTMAP" - The network endpoint is represented by an IP, Port + // and Client Destination Port. // "INTERNET_FQDN_PORT" - The network endpoint is represented by fully // qualified domain name and port. // "INTERNET_IP_PORT" - The network endpoint is represented by an internet IP @@ -27675,6 +27710,10 @@ type NetworkEndpointGroupPscData struct { // for PSC. This IP address acts as a VIP for a PSC NEG, allowing it to act as // an endpoint in L7 PSC-XLB. ConsumerPscAddress string `json:"consumerPscAddress,omitempty"` + // ProducerPort: The psc producer port is used to connect PSC NEG with specific + // port on the PSC Producer side; should only be used for the + // PRIVATE_SERVICE_CONNECT NEG type + ProducerPort int64 `json:"producerPort,omitempty"` // PscConnectionId: [Output Only] The PSC connection id of the PSC Network // Endpoint Group Consumer. PscConnectionId uint64 `json:"pscConnectionId,omitempty,string"` @@ -28577,6 +28616,9 @@ type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { DisplayName string `json:"displayName,omitempty"` // Name: [Output Only] The name of the firewall policy. Name string `json:"name,omitempty"` + // Priority: [Output only] Priority of firewall policy association. Not + // applicable for type=HIERARCHY. + Priority int64 `json:"priority,omitempty"` // Rules: The rules that apply to the network. Rules []*FirewallPolicyRule `json:"rules,omitempty"` // ShortName: [Output Only] The short name of the firewall policy. @@ -28586,6 +28628,7 @@ type NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy struct { // Possible values: // "HIERARCHY" // "NETWORK" + // "SYSTEM" // "UNSPECIFIED" Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to @@ -46373,8 +46416,8 @@ type Subnetwork struct { // Id: [Output Only] The unique identifier for the resource. This identifier is // defined by the server. Id uint64 `json:"id,omitempty,string"` - // InternalIpv6Prefix: [Output Only] The internal IPv6 address range that is - // assigned to this subnetwork. + // InternalIpv6Prefix: The internal IPv6 address range that is owned by this + // subnetwork. InternalIpv6Prefix string `json:"internalIpv6Prefix,omitempty"` // IpCidrRange: The range of internal addresses that are owned by this // subnetwork. Provide this property when you create the subnetwork. For diff --git a/vendor/google.golang.org/api/compute/v1/compute2-gen.go b/vendor/google.golang.org/api/compute/v1/compute2-gen.go index 8b9c7f13e7445..74f8dee4bf6d1 100644 --- a/vendor/google.golang.org/api/compute/v1/compute2-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute2-gen.go @@ -5413,8 +5413,8 @@ type BackendServicesListUsableCall struct { header_ http.Header } -// ListUsable: Retrieves an aggregated list of all usable backend services in -// the specified project. +// ListUsable: Retrieves a list of all usable backend services in the specified +// project. // // - project: Project ID for this request. func (r *BackendServicesService) ListUsable(project string) *BackendServicesListUsableCall { diff --git a/vendor/google.golang.org/api/compute/v1/compute3-gen.go b/vendor/google.golang.org/api/compute/v1/compute3-gen.go index 71248bab0ccb5..dc8c4d4ffcf4e 100644 --- a/vendor/google.golang.org/api/compute/v1/compute3-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute3-gen.go @@ -1650,8 +1650,8 @@ type RegionBackendServicesListUsableCall struct { header_ http.Header } -// ListUsable: Retrieves an aggregated list of all usable backend services in -// the specified project in the given region. +// ListUsable: Retrieves a list of all usable backend services in the specified +// project in the given region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. It must be a string that diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index e4e4e6bf7aec5..ed0987b42e905 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.193.0" +const Version = "0.198.0" diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 5ee9276a0b870..597daf0a72464 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -43,7 +43,7 @@ "location": "me-central2" } ], - "etag": "\"34373939373134303235393739323331393435\"", + "etag": "\"38363036373236373330353534313035333932\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -4136,7 +4136,7 @@ } } }, - "revision": "20240809", + "revision": "20240819", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AnywhereCache": { @@ -4349,6 +4349,11 @@ "format": "int64", "type": "string" }, + "hardDeleteTime": { + "description": "The hard delete time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, "hierarchicalNamespace": { "description": "The bucket's hierarchical namespace configuration.", "properties": { @@ -4675,6 +4680,11 @@ }, "type": "object" }, + "softDeleteTime": { + "description": "The soft delete time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, "storageClass": { "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see [Storage Classes](https://cloud.google.com/storage/docs/storage-classes).", "type": "string" diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 8a16d773485f3..b16e3f227b213 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -463,6 +463,8 @@ type Bucket struct { Etag string `json:"etag,omitempty"` // Generation: The generation of this bucket. Generation int64 `json:"generation,omitempty,string"` + // HardDeleteTime: The hard delete time of the bucket in RFC 3339 format. + HardDeleteTime string `json:"hardDeleteTime,omitempty"` // HierarchicalNamespace: The bucket's hierarchical namespace configuration. HierarchicalNamespace *BucketHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"` // IamConfiguration: The bucket's IAM configuration. @@ -525,6 +527,8 @@ type Bucket struct { // of time that soft-deleted objects will be retained, and cannot be // permanently deleted. SoftDeletePolicy *BucketSoftDeletePolicy `json:"softDeletePolicy,omitempty"` + // SoftDeleteTime: The soft delete time of the bucket in RFC 3339 format. + SoftDeleteTime string `json:"softDeleteTime,omitempty"` // StorageClass: The bucket's default storage class, used whenever no // storageClass is specified for a newly-created object. This defines how // objects in the bucket are stored and determines the SLA and the cost of diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index d2a4f76645aa0..ff3539d898f05 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -247,6 +247,7 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, }) return pool, err } diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 2e2b15c6e0c28..d5b213e0f08df 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -130,6 +130,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, }) if err != nil { return nil, err diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index fe19e8f97a711..aa69fb4d509ff 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -719,6 +719,8 @@ type PythonSettings struct { // Some settings. Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` + // Experimental features to be included during client library generation. + ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"` } func (x *PythonSettings) Reset() { @@ -760,6 +762,13 @@ func (x *PythonSettings) GetCommon() *CommonLanguageSettings { return nil } +func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures { + if x != nil { + return x.ExperimentalFeatures + } + return nil +} + // Settings for Node client libraries. type NodeSettings struct { state protoimpl.MessageState @@ -1114,6 +1123,60 @@ func (x *MethodSettings) GetAutoPopulatedFields() []string { return nil } +// Experimental features to be included during client library generation. +// These fields will be deprecated once the feature graduates and is enabled +// by default. +type PythonSettings_ExperimentalFeatures struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Enables generation of asynchronous REST clients if `rest` transport is + // enabled. By default, asynchronous REST clients will not be generated. + // This feature will be enabled by default 1 month after launching the + // feature in preview packages. + RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"` +} + +func (x *PythonSettings_ExperimentalFeatures) Reset() { + *x = PythonSettings_ExperimentalFeatures{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings_ExperimentalFeatures) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {} + +func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead. +func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool { + if x != nil { + return x.RestAsyncIoEnabled + } + return false +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1142,7 +1205,7 @@ type MethodSettings_LongRunning struct { func (x *MethodSettings_LongRunning) Reset() { *x = MethodSettings_LongRunning{} if protoimpl.UnsafeEnabled { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1155,7 +1218,7 @@ func (x *MethodSettings_LongRunning) String() string { func (*MethodSettings_LongRunning) ProtoMessage() {} func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { - mi := &file_google_api_client_proto_msgTypes[15] + mi := &file_google_api_client_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1460,132 +1523,143 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, - 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, - 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, - 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, - 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, - 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, - 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xfd, 0x01, + 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x64, 0x0a, 0x15, + 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x14, 0x65, 0x78, + 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x1a, 0x49, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x15, 0x72, 0x65, + 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x72, 0x65, 0x73, 0x74, 0x41, + 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, + 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, + 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, + 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, + 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, + 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, - 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, - 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, - 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, - 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, - 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, - 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, - 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, - 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, - 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, - 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, - 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, - 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, - 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, - 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, - 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, - 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, + 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, + 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, + 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, + 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, + 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, + 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, + 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, - 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, - 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, - 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, - 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, - 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, - 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, - 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, - 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, - 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, - 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, - 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, - 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, - 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, - 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, - 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, - 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, - 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x69, - 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, - 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, + 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, + 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x41, + 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, 0x03, + 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, 0x45, + 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, + 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, + 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, + 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, + 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b, 0x61, 0x70, + 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab, 0xfa, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1601,34 +1675,35 @@ func file_google_api_client_proto_rawDescGZIP() []byte { } var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_google_api_client_proto_goTypes = []interface{}{ - (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization - (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination - (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings - (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings - (*Publishing)(nil), // 4: google.api.Publishing - (*JavaSettings)(nil), // 5: google.api.JavaSettings - (*CppSettings)(nil), // 6: google.api.CppSettings - (*PhpSettings)(nil), // 7: google.api.PhpSettings - (*PythonSettings)(nil), // 8: google.api.PythonSettings - (*NodeSettings)(nil), // 9: google.api.NodeSettings - (*DotnetSettings)(nil), // 10: google.api.DotnetSettings - (*RubySettings)(nil), // 11: google.api.RubySettings - (*GoSettings)(nil), // 12: google.api.GoSettings - (*MethodSettings)(nil), // 13: google.api.MethodSettings - nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry - nil, // 15: google.api.DotnetSettings.RenamedServicesEntry - nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry - (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning - (api.LaunchStage)(0), // 18: google.api.LaunchStage - (*durationpb.Duration)(nil), // 19: google.protobuf.Duration - (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*PythonSettings_ExperimentalFeatures)(nil), // 15: google.api.PythonSettings.ExperimentalFeatures + nil, // 16: google.api.DotnetSettings.RenamedServicesEntry + nil, // 17: google.api.DotnetSettings.RenamedResourcesEntry + (*MethodSettings_LongRunning)(nil), // 18: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 19: google.api.LaunchStage + (*durationpb.Duration)(nil), // 20: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 21: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination - 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 19, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings @@ -1645,25 +1720,26 @@ var file_google_api_client_proto_depIdxs = []int32{ 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings - 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry - 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry - 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings - 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings - 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning - 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration - 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration - 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration - 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 21, // 31: google.api.api_version:extendee -> google.protobuf.ServiceOptions - 32, // [32:32] is the sub-list for method output_type - 32, // [32:32] is the sub-list for method input_type - 32, // [32:32] is the sub-list for extension type_name - 28, // [28:32] is the sub-list for extension extendee - 0, // [0:28] is the sub-list for field type_name + 15, // 18: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures + 2, // 19: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 16, // 21: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry + 17, // 22: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry + 2, // 23: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 24: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 18, // 25: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 20, // 26: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 20, // 27: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 20, // 28: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 21, // 29: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 22, // 30: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 22, // 31: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 22, // 32: google.api.api_version:extendee -> google.protobuf.ServiceOptions + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 29, // [29:33] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -1816,7 +1892,19 @@ func file_google_api_client_proto_init() { return nil } } - file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings_ExperimentalFeatures); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*MethodSettings_LongRunning); i { case 0: return &v.state @@ -1835,7 +1923,7 @@ func file_google_api_client_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, NumEnums: 2, - NumMessages: 16, + NumMessages: 17, NumExtensions: 4, NumServices: 0, }, diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md index 6a8a07781ae34..5d4096d46a048 100644 --- a/vendor/google.golang.org/grpc/MAINTAINERS.md +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -9,21 +9,28 @@ for general contribution guidelines. ## Maintainers (in alphabetical order) +- [aranjans](https://github.com/aranjans), Google LLC +- [arjan-bal](https://github.com/arjan-bal), Google LLC +- [arvindbr8](https://github.com/arvindbr8), Google LLC - [atollena](https://github.com/atollena), Datadog, Inc. -- [cesarghali](https://github.com/cesarghali), Google LLC - [dfawley](https://github.com/dfawley), Google LLC - [easwars](https://github.com/easwars), Google LLC -- [menghanl](https://github.com/menghanl), Google LLC -- [srini100](https://github.com/srini100), Google LLC +- [erm-g](https://github.com/erm-g), Google LLC +- [gtcooke94](https://github.com/gtcooke94), Google LLC +- [purnesh42h](https://github.com/purnesh42h), Google LLC +- [zasweq](https://github.com/zasweq), Google LLC ## Emeritus Maintainers (in alphabetical order) -- [adelez](https://github.com/adelez), Google LLC -- [canguler](https://github.com/canguler), Google LLC -- [iamqizhao](https://github.com/iamqizhao), Google LLC -- [jadekler](https://github.com/jadekler), Google LLC -- [jtattermusch](https://github.com/jtattermusch), Google LLC -- [lyuxuan](https://github.com/lyuxuan), Google LLC -- [makmukhi](https://github.com/makmukhi), Google LLC -- [matt-kwong](https://github.com/matt-kwong), Google LLC -- [nicolasnoble](https://github.com/nicolasnoble), Google LLC -- [yongni](https://github.com/yongni), Google LLC +- [adelez](https://github.com/adelez) +- [canguler](https://github.com/canguler) +- [cesarghali](https://github.com/cesarghali) +- [iamqizhao](https://github.com/iamqizhao) +- [jeanbza](https://github.com/jeanbza) +- [jtattermusch](https://github.com/jtattermusch) +- [lyuxuan](https://github.com/lyuxuan) +- [makmukhi](https://github.com/makmukhi) +- [matt-kwong](https://github.com/matt-kwong) +- [menghanl](https://github.com/menghanl) +- [nicolasnoble](https://github.com/nicolasnoble) +- [srini100](https://github.com/srini100) +- [yongni](https://github.com/yongni) diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md index be6e108705c48..abab279379ba8 100644 --- a/vendor/google.golang.org/grpc/SECURITY.md +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -1,3 +1,3 @@ # Security Policy -For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). +For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go index 0787d0b50ce94..d7b40b7cb66f1 100644 --- a/vendor/google.golang.org/grpc/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -39,7 +39,7 @@ type Config struct { MaxDelay time.Duration } -// DefaultConfig is a backoff configuration with the default values specfied +// DefaultConfig is a backoff configuration with the default values specified // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // // This should be useful for callers who want to configure backoff with diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f391744f7299b..b181f386a1ba6 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" @@ -72,8 +73,21 @@ func unregisterForTesting(name string) { delete(m, name) } +// connectedAddress returns the connected address for a SubConnState. The +// address is only valid if the state is READY. +func connectedAddress(scs SubConnState) resolver.Address { + return scs.connectedAddress +} + +// setConnectedAddress sets the connected address for a SubConnState. +func setConnectedAddress(scs *SubConnState, addr resolver.Address) { + scs.connectedAddress = addr +} + func init() { internal.BalancerUnregister = unregisterForTesting + internal.ConnectedAddress = connectedAddress + internal.SetConnectedAddress = setConnectedAddress } // Get returns the resolver builder registered with the given name. @@ -243,6 +257,10 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target + // MetricsRecorder is the metrics recorder that balancers can use to record + // metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this field. + MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. @@ -410,6 +428,9 @@ type SubConnState struct { // ConnectionError is set if the ConnectivityState is TransientFailure, // describing the reason the SubConn failed. Otherwise, it is nil. ConnectionError error + // connectedAddr contains the connected address when ConnectivityState is + // Ready. Otherwise, it is indeterminate. + connectedAddress resolver.Address } // ClientConnState describes the state of a ClientConn relevant to the diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index a7f1eeec8e6ae..2b87bd79c7573 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -36,7 +36,7 @@ type baseBuilder struct { config Config } -func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { bal := &baseBalancer{ cc: cc, pickerBuilder: bb.pickerBuilder, @@ -259,6 +259,6 @@ type errPicker struct { err error // Pick() always returns this err. } -func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return balancer.PickResult{}, p.err } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index 0adc98866c081..52f54e6a016ca 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -780,7 +780,7 @@ func file_grpc_lb_v1_load_balancer_proto_rawDescGZIP() []byte { } var file_grpc_lb_v1_load_balancer_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_grpc_lb_v1_load_balancer_proto_goTypes = []interface{}{ +var file_grpc_lb_v1_load_balancer_proto_goTypes = []any{ (*LoadBalanceRequest)(nil), // 0: grpc.lb.v1.LoadBalanceRequest (*InitialLoadBalanceRequest)(nil), // 1: grpc.lb.v1.InitialLoadBalanceRequest (*ClientStatsPerToken)(nil), // 2: grpc.lb.v1.ClientStatsPerToken @@ -818,7 +818,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*LoadBalanceRequest); i { case 0: return &v.state @@ -830,7 +830,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*InitialLoadBalanceRequest); i { case 0: return &v.state @@ -842,7 +842,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ClientStatsPerToken); i { case 0: return &v.state @@ -854,7 +854,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ClientStats); i { case 0: return &v.state @@ -866,7 +866,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*LoadBalanceResponse); i { case 0: return &v.state @@ -878,7 +878,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*FallbackResponse); i { case 0: return &v.state @@ -890,7 +890,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*InitialLoadBalanceResponse); i { case 0: return &v.state @@ -902,7 +902,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*ServerList); i { case 0: return &v.state @@ -914,7 +914,7 @@ func file_grpc_lb_v1_load_balancer_proto_init() { return nil } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lb_v1_load_balancer_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*Server); i { case 0: return &v.state @@ -927,11 +927,11 @@ func file_grpc_lb_v1_load_balancer_proto_init() { } } } - file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_lb_v1_load_balancer_proto_msgTypes[0].OneofWrappers = []any{ (*LoadBalanceRequest_InitialRequest)(nil), (*LoadBalanceRequest_ClientStats)(nil), } - file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_grpc_lb_v1_load_balancer_proto_msgTypes[4].OneofWrappers = []any{ (*LoadBalanceResponse_InitialResponse)(nil), (*LoadBalanceResponse_ServerList)(nil), (*LoadBalanceResponse_FallbackResponse)(nil), diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index 57a792a7b4887..84e6a25056b9b 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -19,8 +19,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 @@ -34,8 +34,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( LoadBalancer_BalanceLoad_FullMethodName = "/grpc.lb.v1.LoadBalancer/BalanceLoad" @@ -46,7 +46,7 @@ const ( // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type LoadBalancerClient interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) + BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) } type loadBalancerClient struct { @@ -57,53 +57,38 @@ func NewLoadBalancerClient(cc grpc.ClientConnInterface) LoadBalancerClient { return &loadBalancerClient{cc} } -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &LoadBalancer_ServiceDesc.Streams[0], LoadBalancer_BalanceLoad_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &loadBalancerBalanceLoadClient{ClientStream: stream} + x := &grpc.GenericClientStream[LoadBalanceRequest, LoadBalanceResponse]{ClientStream: stream} return x, nil } -type LoadBalancer_BalanceLoadClient interface { - Send(*LoadBalanceRequest) error - Recv() (*LoadBalanceResponse, error) - grpc.ClientStream -} - -type loadBalancerBalanceLoadClient struct { - grpc.ClientStream -} - -func (x *loadBalancerBalanceLoadClient) Send(m *LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadClient) Recv() (*LoadBalanceResponse, error) { - m := new(LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadClient = grpc.BidiStreamingClient[LoadBalanceRequest, LoadBalanceResponse] // LoadBalancerServer is the server API for LoadBalancer service. // All implementations should embed UnimplementedLoadBalancerServer -// for forward compatibility +// for forward compatibility. type LoadBalancerServer interface { // Bidirectional rpc to get a list of servers. - BalanceLoad(LoadBalancer_BalanceLoadServer) error + BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error } -// UnimplementedLoadBalancerServer should be embedded to have forward compatible implementations. -type UnimplementedLoadBalancerServer struct { -} +// UnimplementedLoadBalancerServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedLoadBalancerServer struct{} -func (UnimplementedLoadBalancerServer) BalanceLoad(LoadBalancer_BalanceLoadServer) error { +func (UnimplementedLoadBalancerServer) BalanceLoad(grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse]) error { return status.Errorf(codes.Unimplemented, "method BalanceLoad not implemented") } +func (UnimplementedLoadBalancerServer) testEmbeddedByValue() {} // UnsafeLoadBalancerServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to LoadBalancerServer will @@ -113,34 +98,22 @@ type UnsafeLoadBalancerServer interface { } func RegisterLoadBalancerServer(s grpc.ServiceRegistrar, srv LoadBalancerServer) { + // If the following call panics, it indicates UnimplementedLoadBalancerServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&LoadBalancer_ServiceDesc, srv) } func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{ServerStream: stream}) -} - -type LoadBalancer_BalanceLoadServer interface { - Send(*LoadBalanceResponse) error - Recv() (*LoadBalanceRequest, error) - grpc.ServerStream -} - -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream + return srv.(LoadBalancerServer).BalanceLoad(&grpc.GenericServerStream[LoadBalanceRequest, LoadBalanceResponse]{ServerStream: stream}) } -func (x *loadBalancerBalanceLoadServer) Send(m *LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadServer) Recv() (*LoadBalanceRequest, error) { - m := new(LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type LoadBalancer_BalanceLoadServer = grpc.BidiStreamingServer[LoadBalanceRequest, LoadBalanceResponse] // LoadBalancer_ServiceDesc is the grpc.ServiceDesc for LoadBalancer service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index 47a3e938dcf56..c098762741312 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -219,7 +219,7 @@ type lbBalancer struct { // All backends addresses, with metadata set to nil. This list contains all // backend addresses in the same order and with the same duplicates as in // serverlist. When generating picker, a SubConn slice with the same order - // but with only READY SCs will be gerenated. + // but with only READY SCs will be generated. backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State diff --git a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go index c248a3a83c328..ddd9bd269bf41 100644 --- a/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go +++ b/vendor/google.golang.org/grpc/balancer/leastrequest/leastrequest.go @@ -112,7 +112,9 @@ type scWithRPCCount struct { } func (lrb *leastRequestBalancer) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("least-request: Build called with info: %v", info) + if logger.V(2) { + logger.Infof("least-request: Build called with info: %v", info) + } if len(info.ReadySCs) == 0 { return base.NewErrPicker(balancer.ErrNoSubConnAvailable) } diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go index 07527603f1d4e..4d69b4052f8e3 100644 --- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go +++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go @@ -50,7 +50,7 @@ const ( type pickfirstBuilder struct{} -func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b @@ -155,7 +155,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // Endpoints not set, process addresses until we migrate resolver // emissions fully to Endpoints. The top channel does wrap emitted // addresses with endpoints, however some balancers such as weighted - // target do not forwarrd the corresponding correct endpoints down/split + // target do not forward the corresponding correct endpoints down/split // endpoints properly. Once all balancers correctly forward endpoints // down, can delete this else conditional. addrs = state.ResolverState.Addresses diff --git a/vendor/google.golang.org/grpc/balancer/rls/balancer.go b/vendor/google.golang.org/grpc/balancer/rls/balancer.go index 3ac28271618bb..5ae4d2e131670 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/rls/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" @@ -77,6 +78,42 @@ var ( clientConnUpdateHook = func() {} dataCachePurgeHook = func() {} resetBackoffHook = func() {} + + cacheEntriesMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{ + Name: "grpc.lb.rls.cache_entries", + Description: "EXPERIMENTAL. Number of entries in the RLS cache.", + Unit: "entry", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"}, + Default: false, + }) + cacheSizeMetric = estats.RegisterInt64Gauge(estats.MetricDescriptor{ + Name: "grpc.lb.rls.cache_size", + Description: "EXPERIMENTAL. The current size of the RLS cache.", + Unit: "By", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.instance_uuid"}, + Default: false, + }) + defaultTargetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.default_target_picks", + Description: "EXPERIMENTAL. Number of LB picks sent to the default target.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"}, + Default: false, + }) + targetPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.target_picks", + Description: "EXPERIMENTAL. Number of LB picks sent to each RLS target. Note that if the default target is also returned by the RLS server, RPCs sent to that target from the cache will be counted in this metric, not in grpc.rls.default_target_picks.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target", "grpc.lb.rls.data_plane_target", "grpc.lb.pick_result"}, + Default: false, + }) + failedPicksMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.rls.failed_picks", + Description: "EXPERIMENTAL. Number of LB picks failed due to either a failed RLS request or the RLS channel being throttled.", + Unit: "pick", + Labels: []string{"grpc.target", "grpc.lb.rls.server_target"}, + Default: false, + }) ) func init() { @@ -103,7 +140,7 @@ func (rlsBB) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer. updateCh: buffer.NewUnbounded(), } lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-experimental-lb %p] ", lb)) - lb.dataCache = newDataCache(maxCacheSize, lb.logger) + lb.dataCache = newDataCache(maxCacheSize, lb.logger, opts.MetricsRecorder, opts.Target.String()) lb.bg = balancergroup.New(balancergroup.Options{ CC: cc, BuildOpts: opts, @@ -285,27 +322,27 @@ func (b *rlsBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error // Update the copy of the config in the LB policy before releasing the lock. b.lbCfg = newCfg - - // Enqueue an event which will notify us when the above update has been - // propagated to all child policies, and the child policies have all - // processed their updates, and we have sent a picker update. - done := make(chan struct{}) - b.updateCh.Put(resumePickerUpdates{done: done}) b.stateMu.Unlock() - <-done + // We cannot do cache operations above because `cacheMu` needs to be grabbed + // before `stateMu` if we are to hold both locks at the same time. + b.cacheMu.Lock() + b.dataCache.updateRLSServerTarget(newCfg.lookupService) if resizeCache { // If the new config changes reduces the size of the data cache, we // might have to evict entries to get the cache size down to the newly - // specified size. - // - // And we cannot do this operation above (where we compute the - // `resizeCache` boolean) because `cacheMu` needs to be grabbed before - // `stateMu` if we are to hold both locks at the same time. - b.cacheMu.Lock() + // specified size. If we do evict an entry with valid backoff timer, + // the new picker needs to be sent to the channel to re-process any + // RPCs queued as a result of this backoff timer. b.dataCache.resize(newCfg.cacheSizeBytes) - b.cacheMu.Unlock() } + b.cacheMu.Unlock() + // Enqueue an event which will notify us when the above update has been + // propagated to all child policies, and the child policies have all + // processed their updates, and we have sent a picker update. + done := make(chan struct{}) + b.updateCh.Put(resumePickerUpdates{done: done}) + <-done return nil } @@ -490,15 +527,19 @@ func (b *rlsBalancer) sendNewPickerLocked() { if b.defaultPolicy != nil { b.defaultPolicy.acquireRef() } + picker := &rlsPicker{ - kbm: b.lbCfg.kbMap, - origEndpoint: b.bopts.Target.Endpoint(), - lb: b, - defaultPolicy: b.defaultPolicy, - ctrlCh: b.ctrlCh, - maxAge: b.lbCfg.maxAge, - staleAge: b.lbCfg.staleAge, - bg: b.bg, + kbm: b.lbCfg.kbMap, + origEndpoint: b.bopts.Target.Endpoint(), + lb: b, + defaultPolicy: b.defaultPolicy, + ctrlCh: b.ctrlCh, + maxAge: b.lbCfg.maxAge, + staleAge: b.lbCfg.staleAge, + bg: b.bg, + rlsServerTarget: b.lbCfg.lookupService, + grpcTarget: b.bopts.Target.String(), + metricsRecorder: b.bopts.MetricsRecorder, } picker.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[rls-picker %p] ", picker)) state := balancer.State{ diff --git a/vendor/google.golang.org/grpc/balancer/rls/cache.go b/vendor/google.golang.org/grpc/balancer/rls/cache.go index d7a6a1a436c6f..7fe796c9587a4 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/cache.go +++ b/vendor/google.golang.org/grpc/balancer/rls/cache.go @@ -22,6 +22,8 @@ import ( "container/list" "time" + "github.com/google/uuid" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal/backoff" internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" @@ -47,7 +49,7 @@ type cacheEntry struct { // headerData is received in the RLS response and is to be sent in the // X-Google-RLS-Data header for matching RPCs. headerData string - // expiryTime is the absolute time at which this cache entry entry stops + // expiryTime is the absolute time at which this cache entry stops // being valid. When an RLS request succeeds, this is set to the current // time plus the max_age field from the LB policy config. expiryTime time.Time @@ -163,24 +165,39 @@ func (l *lru) getLeastRecentlyUsed() cacheKey { // // It is not safe for concurrent access. type dataCache struct { - maxSize int64 // Maximum allowed size. - currentSize int64 // Current size. - keys *lru // Cache keys maintained in lru order. - entries map[cacheKey]*cacheEntry - logger *internalgrpclog.PrefixLogger - shutdown *grpcsync.Event + maxSize int64 // Maximum allowed size. + currentSize int64 // Current size. + keys *lru // Cache keys maintained in lru order. + entries map[cacheKey]*cacheEntry + logger *internalgrpclog.PrefixLogger + shutdown *grpcsync.Event + rlsServerTarget string + + // Read only after initialization. + grpcTarget string + uuid string + metricsRecorder estats.MetricsRecorder } -func newDataCache(size int64, logger *internalgrpclog.PrefixLogger) *dataCache { +func newDataCache(size int64, logger *internalgrpclog.PrefixLogger, metricsRecorder estats.MetricsRecorder, grpcTarget string) *dataCache { return &dataCache{ - maxSize: size, - keys: newLRU(), - entries: make(map[cacheKey]*cacheEntry), - logger: logger, - shutdown: grpcsync.NewEvent(), + maxSize: size, + keys: newLRU(), + entries: make(map[cacheKey]*cacheEntry), + logger: logger, + shutdown: grpcsync.NewEvent(), + grpcTarget: grpcTarget, + uuid: uuid.New().String(), + metricsRecorder: metricsRecorder, } } +// updateRLSServerTarget updates the RLS Server Target the RLS Balancer is +// configured with. +func (dc *dataCache) updateRLSServerTarget(rlsServerTarget string) { + dc.rlsServerTarget = rlsServerTarget +} + // resize changes the maximum allowed size of the data cache. // // The return value indicates if an entry with a valid backoff timer was @@ -223,7 +240,7 @@ func (dc *dataCache) resize(size int64) (backoffCancelled bool) { backoffCancelled = true } } - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) } dc.maxSize = size return backoffCancelled @@ -249,7 +266,7 @@ func (dc *dataCache) evictExpiredEntries() bool { if entry.expiryTime.After(now) || entry.backoffExpiryTime.After(now) { continue } - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) evicted = true } return evicted @@ -310,6 +327,8 @@ func (dc *dataCache) addEntry(key cacheKey, entry *cacheEntry) (backoffCancelled if dc.currentSize > dc.maxSize { backoffCancelled = dc.resize(dc.maxSize) } + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) + cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid) return backoffCancelled, true } @@ -319,6 +338,7 @@ func (dc *dataCache) updateEntrySize(entry *cacheEntry, newSize int64) { dc.currentSize -= entry.size entry.size = newSize dc.currentSize += entry.size + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) } func (dc *dataCache) getEntry(key cacheKey) *cacheEntry { @@ -339,7 +359,7 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) { if !ok { return } - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) } // deleteAndCleanup performs actions required at the time of deleting an entry @@ -347,15 +367,17 @@ func (dc *dataCache) removeEntryForTesting(key cacheKey) { // - the entry is removed from the map of entries // - current size of the data cache is update // - the key is removed from the LRU -func (dc *dataCache) deleteAndcleanup(key cacheKey, entry *cacheEntry) { +func (dc *dataCache) deleteAndCleanup(key cacheKey, entry *cacheEntry) { delete(dc.entries, key) dc.currentSize -= entry.size dc.keys.removeEntry(key) + cacheSizeMetric.Record(dc.metricsRecorder, dc.currentSize, dc.grpcTarget, dc.rlsServerTarget, dc.uuid) + cacheEntriesMetric.Record(dc.metricsRecorder, int64(len(dc.entries)), dc.grpcTarget, dc.rlsServerTarget, dc.uuid) } func (dc *dataCache) stop() { for key, entry := range dc.entries { - dc.deleteAndcleanup(key, entry) + dc.deleteAndCleanup(key, entry) } dc.shutdown.Fire() } diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go index 13b316b7fa233..1ab874c356fef 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go +++ b/vendor/google.golang.org/grpc/balancer/rls/internal/adaptive/lookback.go @@ -82,10 +82,3 @@ func (l *lookback) advance(t time.Time) int64 { l.head = nh return nh } - -func min(x int64, y int64) int64 { - if x < y { - return x - } - return y -} diff --git a/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go b/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go index d010f74456fed..cc5ce510ad902 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go +++ b/vendor/google.golang.org/grpc/balancer/rls/internal/keys/builder.go @@ -218,7 +218,7 @@ type matcher struct { names []string } -// Equal reports if m and are are equivalent headerKeys. +// Equal reports if m and a are equivalent headerKeys. func (m matcher) Equal(a matcher) bool { if m.key != a.key { return false diff --git a/vendor/google.golang.org/grpc/balancer/rls/picker.go b/vendor/google.golang.org/grpc/balancer/rls/picker.go index 8f617a4e42e01..e5c86f2906875 100644 --- a/vendor/google.golang.org/grpc/balancer/rls/picker.go +++ b/vendor/google.golang.org/grpc/balancer/rls/picker.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/balancer/rls/internal/keys" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" internalgrpclog "google.golang.org/grpc/internal/grpclog" rlspb "google.golang.org/grpc/internal/proto/grpc_lookup_v1" "google.golang.org/grpc/metadata" @@ -61,12 +62,15 @@ type rlsPicker struct { // The picker is given its own copy of the below fields from the RLS LB policy // to avoid having to grab the mutex on the latter. - defaultPolicy *childPolicyWrapper // Child policy for the default target. - ctrlCh *controlChannel // Control channel to the RLS server. - maxAge time.Duration // Cache max age from LB config. - staleAge time.Duration // Cache stale age from LB config. - bg exitIdler - logger *internalgrpclog.PrefixLogger + rlsServerTarget string + grpcTarget string + metricsRecorder estats.MetricsRecorder + defaultPolicy *childPolicyWrapper // Child policy for the default target. + ctrlCh *controlChannel // Control channel to the RLS server. + maxAge time.Duration // Cache max age from LB config. + staleAge time.Duration // Cache stale age from LB config. + bg exitIdler + logger *internalgrpclog.PrefixLogger } // isFullMethodNameValid return true if name is of the form `/service/method`. @@ -85,7 +89,17 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { reqKeys := p.kbm.RLSKey(md, p.origEndpoint, info.FullMethodName) p.lb.cacheMu.Lock() - defer p.lb.cacheMu.Unlock() + var pr balancer.PickResult + var err error + + // Record metrics without the cache mutex held, to prevent lock contention + // between concurrent RPC's and their Pick calls. Metrics Recording can + // potentially be expensive. + metricsCallback := func() {} + defer func() { + p.lb.cacheMu.Unlock() + metricsCallback() + }() // Lookup data cache and pending request map using request path and keys. cacheKey := cacheKey{path: info.FullMethodName, keys: reqKeys.Str} @@ -98,7 +112,8 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { case dcEntry == nil && pendingEntry == nil: throttled := p.sendRouteLookupRequestLocked(cacheKey, &backoffState{bs: defaultBackoffStrategy}, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") if throttled { - return p.useDefaultPickIfPossible(info, errRLSThrottled) + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled) + return pr, err } return balancer.PickResult{}, balancer.ErrNoSubConnAvailable @@ -113,8 +128,8 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_STALE, dcEntry.headerData) } // Delegate to child policies. - res, err := p.delegateToChildPoliciesLocked(dcEntry, info) - return res, err + pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info) + return pr, err } // We get here only if the data cache entry has expired. If entry is in @@ -126,67 +141,108 @@ func (p *rlsPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // message received from the control plane is still fine, as it could be // useful for debugging purposes. st := dcEntry.status - return p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, status.Error(codes.Unavailable, fmt.Sprintf("most recent error from RLS server: %v", st.Error()))) + return pr, err } // We get here only if the entry has expired and is not in backoff. throttled := p.sendRouteLookupRequestLocked(cacheKey, dcEntry.backoffState, reqKeys.Map, rlspb.RouteLookupRequest_REASON_MISS, "") if throttled { - return p.useDefaultPickIfPossible(info, errRLSThrottled) + pr, metricsCallback, err = p.useDefaultPickIfPossible(info, errRLSThrottled) + return pr, err } return balancer.PickResult{}, balancer.ErrNoSubConnAvailable // Data cache hit. Pending request exists. default: if dcEntry.expiryTime.After(now) { - res, err := p.delegateToChildPoliciesLocked(dcEntry, info) - return res, err + pr, metricsCallback, err = p.delegateToChildPoliciesLocked(dcEntry, info) + return pr, err } // Data cache entry has expired and pending request exists. Queue pick. return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } } +// errToPickResult is a helper function which converts the error value returned +// by Pick() to a string that represents the pick result. +func errToPickResult(err error) string { + if err == nil { + return "complete" + } + if errors.Is(err, balancer.ErrNoSubConnAvailable) { + return "queue" + } + if _, ok := status.FromError(err); ok { + return "drop" + } + return "fail" +} + // delegateToChildPoliciesLocked is a helper function which iterates through the // list of child policy wrappers in a cache entry and attempts to find a child // policy to which this RPC can be routed to. If all child policies are in -// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. -func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, error) { +// TRANSIENT_FAILURE, we delegate to the last child policy arbitrarily. Returns +// a function to be invoked to record metrics. +func (p *rlsPicker) delegateToChildPoliciesLocked(dcEntry *cacheEntry, info balancer.PickInfo) (balancer.PickResult, func(), error) { const rlsDataHeaderName = "x-google-rls-data" for i, cpw := range dcEntry.childPolicyWrappers { state := (*balancer.State)(atomic.LoadPointer(&cpw.state)) // Delegate to the child policy if it is not in TRANSIENT_FAILURE, or if // it is the last one (which handles the case of delegating to the last - // child picker if all child polcies are in TRANSIENT_FAILURE). + // child picker if all child policies are in TRANSIENT_FAILURE). if state.ConnectivityState != connectivity.TransientFailure || i == len(dcEntry.childPolicyWrappers)-1 { // Any header data received from the RLS server is stored in the // cache entry and needs to be sent to the actual backend in the // X-Google-RLS-Data header. res, err := state.Picker.Pick(info) if err != nil { - return res, err + pr := errToPickResult(err) + return res, func() { + if pr == "queue" { + // Don't record metrics for queued Picks. + return + } + targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, pr) + }, err } + if res.Metadata == nil { res.Metadata = metadata.Pairs(rlsDataHeaderName, dcEntry.headerData) } else { res.Metadata.Append(rlsDataHeaderName, dcEntry.headerData) } - return res, nil + return res, func() { + targetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, cpw.target, "complete") + }, nil } } + // In the unlikely event that we have a cache entry with no targets, we end up // queueing the RPC. - return balancer.PickResult{}, balancer.ErrNoSubConnAvailable + return balancer.PickResult{}, func() {}, balancer.ErrNoSubConnAvailable } // useDefaultPickIfPossible is a helper method which delegates to the default -// target if one is configured, or fails the pick with the given error. -func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, error) { +// target if one is configured, or fails the pick with the given error. Returns +// a function to be invoked to record metrics. +func (p *rlsPicker) useDefaultPickIfPossible(info balancer.PickInfo, errOnNoDefault error) (balancer.PickResult, func(), error) { if p.defaultPolicy != nil { state := (*balancer.State)(atomic.LoadPointer(&p.defaultPolicy.state)) - return state.Picker.Pick(info) + res, err := state.Picker.Pick(info) + pr := errToPickResult(err) + return res, func() { + if pr == "queue" { + // Don't record metrics for queued Picks. + return + } + defaultTargetPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget, p.defaultPolicy.target, pr) + }, err } - return balancer.PickResult{}, errOnNoDefault + + return balancer.PickResult{}, func() { + failedPicksMetric.Record(p.metricsRecorder, 1, p.grpcTarget, p.rlsServerTarget) + }, errOnNoDefault } // sendRouteLookupRequestLocked adds an entry to the pending request map and diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go index 36606e79e4442..88bf64ec4ec49 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/balancer.go @@ -32,7 +32,9 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/balancer/weightedroundrobin/internal" + "google.golang.org/grpc/balancer/weightedtarget" "google.golang.org/grpc/connectivity" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal/grpclog" iserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/orca" @@ -45,6 +47,43 @@ import ( // Name is the name of the weighted round robin balancer. const Name = "weighted_round_robin" +var ( + rrFallbackMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.rr_fallback", + Description: "EXPERIMENTAL. Number of scheduler updates in which there were not enough endpoints with valid weight, which caused the WRR policy to fall back to RR behavior.", + Unit: "update", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + + endpointWeightNotYetUsableMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weight_not_yet_usable", + Description: "EXPERIMENTAL. Number of endpoints from each scheduler update that don't yet have usable weight information (i.e., either the load report has not yet been received, or it is within the blackout period).", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + + endpointWeightStaleMetric = estats.RegisterInt64Count(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weight_stale", + Description: "EXPERIMENTAL. Number of endpoints from each scheduler update whose latest weight is older than the expiration period.", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) + endpointWeightsMetric = estats.RegisterFloat64Histo(estats.MetricDescriptor{ + Name: "grpc.lb.wrr.endpoint_weights", + Description: "EXPERIMENTAL. Weight of each endpoint, recorded on every scheduler update. Endpoints without usable weights will be recorded as weight 0.", + Unit: "endpoint", + Labels: []string{"grpc.target"}, + OptionalLabels: []string{"grpc.lb.locality"}, + Default: false, + }) +) + func init() { balancer.Register(bb{}) } @@ -58,7 +97,10 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba csEvltr: &balancer.ConnectivityStateEvaluator{}, scMap: make(map[balancer.SubConn]*weightedSubConn), connectivityState: connectivity.Connecting, + target: bOpts.Target.String(), + metricsRecorder: bOpts.MetricsRecorder, } + b.logger = prefixLogger(b) b.logger.Infof("Created") return b @@ -101,8 +143,11 @@ func (bb) Name() string { // wrrBalancer implements the weighted round robin LB policy. type wrrBalancer struct { - cc balancer.ClientConn - logger *grpclog.PrefixLogger + // The following fields are immutable. + cc balancer.ClientConn + logger *grpclog.PrefixLogger + target string + metricsRecorder estats.MetricsRecorder // The following fields are only accessed on calls into the LB policy, and // do not need a mutex. @@ -114,6 +159,7 @@ type wrrBalancer struct { resolverErr error // the last error reported by the resolver; cleared on successful resolution connErr error // the last connection error; cleared upon leaving TransientFailure stopPicker func() + locality string } func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { @@ -125,6 +171,7 @@ func (b *wrrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error } b.cfg = cfg + b.locality = weightedtarget.LocalityFromResolverState(ccs.ResolverState) b.updateAddresses(ccs.ResolverState.Addresses) if len(ccs.ResolverState.Addresses) == 0 { @@ -171,6 +218,10 @@ func (b *wrrBalancer) updateAddresses(addrs []resolver.Address) { // Initially, we set load reports to off, because they are not // running upon initial weightedSubConn creation. cfg: &lbConfig{EnableOOBLoadReport: false}, + + metricsRecorder: b.metricsRecorder, + target: b.target, + locality: b.locality, } b.subConns.Set(addr, wsc) b.scMap[sc] = wsc @@ -318,9 +369,12 @@ func (b *wrrBalancer) regeneratePicker() { } p := &picker{ - v: rand.Uint32(), // start the scheduler at a random point - cfg: b.cfg, - subConns: b.readySubConns(), + v: rand.Uint32(), // start the scheduler at a random point + cfg: b.cfg, + subConns: b.readySubConns(), + metricsRecorder: b.metricsRecorder, + locality: b.locality, + target: b.target, } var ctx context.Context ctx, b.stopPicker = context.WithCancel(context.Background()) @@ -339,16 +393,20 @@ type picker struct { v uint32 // incrementing value used by the scheduler; accessed atomically cfg *lbConfig // active config when picker created subConns []*weightedSubConn // all READY subconns + + // The following fields are immutable. + target string + locality string + metricsRecorder estats.MetricsRecorder } -// scWeights returns a slice containing the weights from p.subConns in the same -// order as p.subConns. -func (p *picker) scWeights() []float64 { +func (p *picker) scWeights(recordMetrics bool) []float64 { ws := make([]float64, len(p.subConns)) now := internal.TimeNow() for i, wsc := range p.subConns { - ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod)) + ws[i] = wsc.weight(now, time.Duration(p.cfg.WeightExpirationPeriod), time.Duration(p.cfg.BlackoutPeriod), recordMetrics) } + return ws } @@ -357,7 +415,7 @@ func (p *picker) inc() uint32 { } func (p *picker) regenerateScheduler() { - s := newScheduler(p.scWeights(), p.inc) + s := p.newScheduler(true) atomic.StorePointer(&p.scheduler, unsafe.Pointer(&s)) } @@ -367,6 +425,7 @@ func (p *picker) start(ctx context.Context) { // No need to regenerate weights with only one backend. return } + go func() { ticker := time.NewTicker(time.Duration(p.cfg.WeightUpdatePeriod)) defer ticker.Stop() @@ -381,7 +440,7 @@ func (p *picker) start(ctx context.Context) { }() } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { // Read the scheduler atomically. All scheduler operations are threadsafe, // and if the scheduler is replaced during this usage, we want to use the // scheduler that was live when the pick started. @@ -404,8 +463,12 @@ func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // When needed, it also tracks connectivity state, listens for metrics updates // by implementing the orca.OOBListener interface and manages that listener. type weightedSubConn struct { + // The following fields are immutable. balancer.SubConn - logger *grpclog.PrefixLogger + logger *grpclog.PrefixLogger + target string + metricsRecorder estats.MetricsRecorder + locality string // The following fields are only accessed on calls into the LB policy, and // do not need a mutex. @@ -450,7 +513,7 @@ func (w *weightedSubConn) OnLoadReport(load *v3orcapb.OrcaLoadReport) { } w.lastUpdated = internal.TimeNow() - if w.nonEmptySince == (time.Time{}) { + if w.nonEmptySince.Equal(time.Time{}) { w.nonEmptySince = w.lastUpdated } } @@ -495,14 +558,17 @@ func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connect w.SubConn.Connect() case connectivity.Ready: // If we transition back to READY state, reset nonEmptySince so that we - // apply the blackout period after we start receiving load data. Note - // that we cannot guarantee that we will never receive lingering - // callbacks for backend metric reports from the previous connection - // after the new connection has been established, but they should be - // masked by new backend metric reports from the new connection by the - // time the blackout period ends. + // apply the blackout period after we start receiving load data. Also + // reset lastUpdated to trigger endpoint weight not yet usable in the + // case endpoint gets asked what weight it is before receiving a new + // load report. Note that we cannot guarantee that we will never receive + // lingering callbacks for backend metric reports from the previous + // connection after the new connection has been established, but they + // should be masked by new backend metric reports from the new + // connection by the time the blackout period ends. w.mu.Lock() w.nonEmptySince = time.Time{} + w.lastUpdated = time.Time{} w.mu.Unlock() case connectivity.Shutdown: if w.stopORCAListener != nil { @@ -527,21 +593,44 @@ func (w *weightedSubConn) updateConnectivityState(cs connectivity.State) connect // weight returns the current effective weight of the subconn, taking into // account the parameters. Returns 0 for blacked out or expired data, which -// will cause the backend weight to be treated as the mean of the weights of -// the other backends. -func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration) float64 { +// will cause the backend weight to be treated as the mean of the weights of the +// other backends. If forScheduler is set to true, this function will emit +// metrics through the metrics registry. +func (w *weightedSubConn) weight(now time.Time, weightExpirationPeriod, blackoutPeriod time.Duration, recordMetrics bool) (weight float64) { w.mu.Lock() defer w.mu.Unlock() + + if recordMetrics { + defer func() { + endpointWeightsMetric.Record(w.metricsRecorder, weight, w.target, w.locality) + }() + } + + // The SubConn has not received a load report (i.e. just turned READY with + // no load report). + if w.lastUpdated.Equal(time.Time{}) { + endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + return 0 + } + // If the most recent update was longer ago than the expiration period, // reset nonEmptySince so that we apply the blackout period again if we // start getting data again in the future, and return 0. if now.Sub(w.lastUpdated) >= weightExpirationPeriod { + if recordMetrics { + endpointWeightStaleMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + } w.nonEmptySince = time.Time{} return 0 } + // If we don't have at least blackoutPeriod worth of data, return 0. - if blackoutPeriod != 0 && (w.nonEmptySince == (time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) { + if blackoutPeriod != 0 && (w.nonEmptySince.Equal(time.Time{}) || now.Sub(w.nonEmptySince) < blackoutPeriod) { + if recordMetrics { + endpointWeightNotYetUsableMetric.Record(w.metricsRecorder, 1, w.target, w.locality) + } return 0 } + return w.weightVal } diff --git a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go index f389678b4e825..56aa15da10d27 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go +++ b/vendor/google.golang.org/grpc/balancer/weightedroundrobin/scheduler.go @@ -31,13 +31,17 @@ type scheduler interface { // len(scWeights)-1 are zero or there is only a single subconn, otherwise it // will return an Earliest Deadline First (EDF) scheduler implementation that // selects the subchannels according to their weights. -func newScheduler(scWeights []float64, inc func() uint32) scheduler { +func (p *picker) newScheduler(recordMetrics bool) scheduler { + scWeights := p.scWeights(recordMetrics) n := len(scWeights) if n == 0 { return nil } if n == 1 { - return &rrScheduler{numSCs: 1, inc: inc} + if recordMetrics { + rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality) + } + return &rrScheduler{numSCs: 1, inc: p.inc} } sum := float64(0) numZero := 0 @@ -51,8 +55,12 @@ func newScheduler(scWeights []float64, inc func() uint32) scheduler { numZero++ } } + if numZero >= n-1 { - return &rrScheduler{numSCs: uint32(n), inc: inc} + if recordMetrics { + rrFallbackMetric.Record(p.metricsRecorder, 1, p.target, p.locality) + } + return &rrScheduler{numSCs: uint32(n), inc: p.inc} } unscaledMean := sum / float64(n-numZero) scalingFactor := maxWeight / max @@ -74,11 +82,11 @@ func newScheduler(scWeights []float64, inc func() uint32) scheduler { } if allEqual { - return &rrScheduler{numSCs: uint32(n), inc: inc} + return &rrScheduler{numSCs: uint32(n), inc: p.inc} } logger.Infof("using edf scheduler with weights: %v", weights) - return &edfScheduler{weights: weights, inc: inc} + return &edfScheduler{weights: weights, inc: p.inc} } const maxWeight = math.MaxUint16 diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go index 27279257ed138..bcc8aca8b4912 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedaggregator/aggregator.go @@ -89,7 +89,7 @@ func New(cc balancer.ClientConn, logger *grpclog.PrefixLogger, newWRR func() wrr } // Start starts the aggregator. It can be called after Stop to restart the -// aggretator. +// aggregator. func (wbsa *Aggregator) Start() { wbsa.mu.Lock() defer wbsa.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go index 220f4e555674a..dfd1ef26dcb02 100644 --- a/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go +++ b/vendor/google.golang.org/grpc/balancer/weightedtarget/weightedtarget.go @@ -84,6 +84,17 @@ type weightedTargetBalancer struct { targets map[string]Target } +type localityKeyType string + +const localityKey = localityKeyType("locality") + +// LocalityFromResolverState returns the locality from the resolver.State +// provided, or an empty string if not present. +func LocalityFromResolverState(state resolver.State) string { + locality, _ := state.Attributes.Value(localityKey).(string) + return locality +} + // UpdateClientConnState takes the new targets in balancer group, // creates/deletes sub-balancers and sends them update. addresses are split into // groups based on hierarchy path. @@ -142,7 +153,7 @@ func (b *weightedTargetBalancer) UpdateClientConnState(s balancer.ClientConnStat ResolverState: resolver.State{ Addresses: addressesSplit[name], ServiceConfig: s.ResolverState.ServiceConfig, - Attributes: s.ResolverState.Attributes, + Attributes: s.ResolverState.Attributes.WithValue(localityKey, name), }, BalancerConfig: newT.ChildPolicy.Config, }) diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index 4161fdf47a8b1..8ad6ce2f0950a 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -25,12 +25,15 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) +var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the @@ -79,6 +82,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, + MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -92,7 +96,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { // it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { errCh := make(chan error) - ok := ccb.serializer.Schedule(func(ctx context.Context) { + uccs := func(ctx context.Context) { defer close(errCh) if ctx.Err() != nil || ccb.balancer == nil { return @@ -107,17 +111,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat logger.Infof("error from balancer.UpdateClientConnState: %v", err) } errCh <- err - }) - if !ok { - return nil } + onFailure := func() { close(errCh) } + + // UpdateClientConnState can race with Close, and when the latter wins, the + // serializer is closed, and the attempt to schedule the callback will fail. + // It is acceptable to ignore this failure. But since we want to handle the + // state update in a blocking fashion (when we successfully schedule the + // callback), we have to use the ScheduleOr method and not the MaybeSchedule + // method on the serializer. + ccb.serializer.ScheduleOr(uccs, onFailure) return <-errCh } // resolverError is invoked by grpc to push a resolver error to the underlying // balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -133,7 +143,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.closed = true ccb.mu.Unlock() channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") - ccb.serializer.Schedule(func(context.Context) { + ccb.serializer.TrySchedule(func(context.Context) { if ccb.balancer == nil { return } @@ -145,7 +155,7 @@ func (ccb *ccBalancerWrapper) close() { // exitIdle invokes the balancer's exitIdle method in the serializer. func (ccb *ccBalancerWrapper) exitIdle() { - ccb.serializer.Schedule(func(ctx context.Context) { + ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccb.balancer == nil { return } @@ -182,7 +192,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return acbw, nil } -func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { +func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) { // The graceful switch balancer will never call this. logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } @@ -252,15 +262,29 @@ type acBalancerWrapper struct { // updateState is invoked by grpc to push a subConn state update to the // underlying balancer. -func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { - acbw.ccb.serializer.Schedule(func(ctx context.Context) { +func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return } // Even though it is optional for balancers, gracefulswitch ensures // opts.StateListener is set, so this cannot ever be nil. // TODO: delete this comment when UpdateSubConnState is removed. - acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err} + if s == connectivity.Ready { + setConnectedAddress(&scs, curAddr) + } + acbw.stateListener(scs) + acbw.ac.mu.Lock() + defer acbw.ac.mu.Unlock() + if s == connectivity.Ready { + // When changing states to READY, reset stateReadyChan. Wait until + // after we notify the LB policy's listener(s) in order to prevent + // ac.getTransport() from unblocking before the LB policy starts + // tracking the subchannel as READY. + close(acbw.ac.stateReadyChan) + acbw.ac.stateReadyChan = make(chan struct{}) + } }) } @@ -318,8 +342,8 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( pData := acbw.producers[pb] if pData == nil { // Not found; create a new one and add it to the producers map. - p, close := pb.Build(acbw) - pData = &refCountedProducer{producer: p, close: close} + p, closeFn := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: closeFn} acbw.producers[pb] = pData } // Account for this new reference. diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 63c639e4fe933..55bffaa77ef0f 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,8 +18,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 @@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ +var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type @@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*GrpcLogEntry); i { case 0: return &v.state @@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*ClientHeader); i { case 0: return &v.state @@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*ServerHeader); i { case 0: return &v.state @@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*Trailer); i { case 0: return &v.state @@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*Message); i { case 0: return &v.state @@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*Metadata); i { case 0: return &v.state @@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*MetadataEntry); i { case 0: return &v.state @@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { return nil } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*Address); i { case 0: return &v.state @@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { } } } - file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{ (*GrpcLogEntry_ClientHeader)(nil), (*GrpcLogEntry_ServerHeader)(nil), (*GrpcLogEntry_Message)(nil), diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 423be7b43b00c..9c8850e3fdd5b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,6 +24,7 @@ import ( "fmt" "math" "net/url" + "slices" "strings" "sync" "sync/atomic" @@ -39,6 +40,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/idle" iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers) + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil } @@ -590,13 +595,14 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). - authority string // See initAuthority(). - dopts dialOptions // Default and user specified dial options. - channelz *channelz.Channel // Channelz object. - resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). - idlenessMgr *idle.Manager + target string // User's dial target. + parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder(). + authority string // See initAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder(). + idlenessMgr *idle.Manager + metricsRecorderList *stats.MetricsRecorderList // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -626,11 +632,6 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { ch := cc.csMgr.getNotifyChan() if cc.csMgr.getState() != sourceState { @@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec } // GetState returns the connectivity.State of ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a later -// release. func (cc *ClientConn) GetState() connectivity.State { return cc.csMgr.getState() } @@ -812,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { cc.csMgr.updateState(connectivity.TransientFailure) } -// Makes a copy of the input addresses slice and clears out the balancer -// attributes field. Addresses are passed during subconn creation and address -// update operations. In both cases, we will clear the balancer attributes by -// calling this function, and therefore we will be able to use the Equal method -// provided by the resolver.Address type for comparison. -func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { +// Makes a copy of the input addresses slice. Addresses are passed during +// subconn creation and address update operations. +func copyAddresses(in []resolver.Address) []resolver.Address { out := make([]resolver.Address, len(in)) - for i := range in { - out[i] = in[i] - out[i].BalancerAttributes = nil - } + copy(out, in) return out } @@ -835,14 +825,14 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. } ac := &addrConn{ - state: connectivity.Idle, - cc: cc, - addrs: copyAddressesWithoutBalancerAttributes(addrs), - scopts: opts, - dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz, ""), - resetBackoff: make(chan struct{}), - stateChan: make(chan struct{}), + state: connectivity.Idle, + cc: cc, + addrs: copyAddresses(addrs), + scopts: opts, + dopts: cc.dopts, + channelz: channelz.RegisterSubChannel(cc.channelz, ""), + resetBackoff: make(chan struct{}), + stateReadyChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Start with our address set to the first address; this may be updated if @@ -918,28 +908,29 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - ac.mu.Unlock() - ac.resetTransport() + ac.resetTransportAndUnlock() return nil } -func equalAddresses(a, b []resolver.Address) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if !v.Equal(b[i]) { - return false - } - } - return true +// equalAddressIgnoringBalAttributes returns true is a and b are considered equal. +// This is different from the Equal method on the resolver.Address type which +// considers all fields to determine equality. Here, we only consider fields +// that are meaningful to the subConn. +func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { + return a.Addr == b.Addr && a.ServerName == b.ServerName && + a.Attributes.Equal(b.Attributes) && + a.Metadata == b.Metadata +} + +func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool { + return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) }) } // updateAddrs updates ac.addrs with the new addresses list and handles active // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { - addrs = copyAddressesWithoutBalancerAttributes(addrs) + addrs = copyAddresses(addrs) limit := len(addrs) if limit > 5 { limit = 5 @@ -947,7 +938,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit]) ac.mu.Lock() - if equalAddresses(ac.addrs, addrs) { + if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) { ac.mu.Unlock() return } @@ -966,7 +957,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { // Try to find the connected address. for _, a := range addrs { a.ServerName = ac.cc.getServerName(a) - if a.Equal(ac.curAddr) { + if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) { // We are connected to a valid address, so do nothing but // update the addresses. ac.mu.Unlock() @@ -992,11 +983,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.updateConnectivityState(connectivity.Idle, nil) } - ac.mu.Unlock() - // Since we were connecting/connected, we should start a new connection // attempt. - go ac.resetTransport() + go ac.resetTransportAndUnlock() } // getServerName determines the serverName to be used in the connection @@ -1190,8 +1179,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State - stateChan chan struct{} // closed and recreated on every state change. + state connectivity.State + stateReadyChan chan struct{} // closed and recreated on every READY state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1204,9 +1193,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - // When changing states, reset the state change channel. - close(ac.stateChan) - ac.stateChan = make(chan struct{}) ac.state = s ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { @@ -1214,7 +1200,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.acbw.updateState(s, lastErr) + ac.acbw.updateState(s, ac.curAddr, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1231,8 +1217,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { } } -func (ac *addrConn) resetTransport() { - ac.mu.Lock() +// resetTransportAndUnlock unconditionally connects the addrConn. +// +// ac.mu must be held by the caller, and this function will guarantee it is released. +func (ac *addrConn) resetTransportAndUnlock() { acCtx := ac.ctx if acCtx.Err() != nil { ac.mu.Unlock() @@ -1522,7 +1510,7 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { for ctx.Err() == nil { ac.mu.Lock() - t, state, sc := ac.transport, ac.state, ac.stateChan + t, state, sc := ac.transport, ac.state, ac.stateReadyChan ac.mu.Unlock() if state == connectivity.Ready { return t, nil @@ -1585,7 +1573,7 @@ func (ac *addrConn) tearDown(err error) { } else { // Hard close the transport when the channel is entering idle or is // being shutdown. In the case where the channel is being shutdown, - // closing of transports is also taken care of by cancelation of cc.ctx. + // closing of transports is also taken care of by cancellation of cc.ctx. // But in the case where the channel is entering idle, we need to // explicitly close the transports here. Instead of distinguishing // between these two cases, it is simpler to close the transport diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 411e3dfd47ccd..e840858b77b18 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -21,18 +21,73 @@ package grpc import ( "google.golang.org/grpc/encoding" _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" + "google.golang.org/grpc/mem" ) -// baseCodec contains the functionality of both Codec and encoding.Codec, but -// omits the name/string, which vary between the two and are not needed for -// anything besides the registry in the encoding package. +// baseCodec captures the new encoding.CodecV2 interface without the Name +// function, allowing it to be implemented by older Codec and encoding.Codec +// implementations. The omitted Name function is only needed for the register in +// the encoding package and is not part of the core functionality. type baseCodec interface { - Marshal(v any) ([]byte, error) - Unmarshal(data []byte, v any) error + Marshal(v any) (mem.BufferSlice, error) + Unmarshal(data mem.BufferSlice, v any) error +} + +// getCodec returns an encoding.CodecV2 for the codec of the given name (if +// registered). Initially checks the V2 registry with encoding.GetCodecV2 and +// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry +// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge +// to turn it into an encoding.CodecV2. Returns nil otherwise. +func getCodec(name string) encoding.CodecV2 { + if codecV1 := encoding.GetCodec(name); codecV1 != nil { + return newCodecV1Bridge(codecV1) + } + + return encoding.GetCodecV2(name) +} + +func newCodecV0Bridge(c Codec) baseCodec { + return codecV0Bridge{codec: c} +} + +func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 { + return codecV1Bridge{ + codecV0Bridge: codecV0Bridge{codec: c}, + name: c.Name(), + } +} + +var _ baseCodec = codecV0Bridge{} + +type codecV0Bridge struct { + codec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error + } +} + +func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) { + data, err := c.codec.Marshal(v) + if err != nil { + return nil, err + } + return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil +} + +func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) { + return c.codec.Unmarshal(data.Materialize(), v) } -var _ baseCodec = Codec(nil) -var _ baseCodec = encoding.Codec(nil) +var _ encoding.CodecV2 = codecV1Bridge{} + +type codecV1Bridge struct { + codecV0Bridge + name string +} + +func (c codecV1Bridge) Name() string { + return c.name +} // Codec defines the interface gRPC uses to encode and decode messages. // Note that implementations of this interface must be thread safe; diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go index 43726e877b8b3..7e4bfee888615 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aeadrekey.go @@ -49,7 +49,7 @@ func (k KeySizeError) Error() string { // newRekeyAEAD creates a new instance of aes128gcm with rekeying. // The key argument should be 44 bytes, the first 32 bytes are used as a key -// for HKDF-expand and the remainining 12 bytes are used as a random mask for +// for HKDF-expand and the remaining 12 bytes are used as a random mask for // the counter. func newRekeyAEAD(key []byte) (*rekeyAEAD, error) { k := len(key) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go index 6a9035ea254f8..b5bbb5497aa39 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/aes128gcmrekey.go @@ -51,7 +51,7 @@ type aes128gcmRekey struct { // NewAES128GCMRekey creates an instance that uses aes128gcm with rekeying // for ALTS record. The key argument should be 44 bytes, the first 32 bytes -// are used as a key for HKDF-expand and the remainining 12 bytes are used +// are used as a key for HKDF-expand and the remaining 12 bytes are used // as a random mask for the counter. func NewAES128GCMRekey(side core.Side, key []byte) (ALTSRecordCrypto, error) { inCounter := NewInCounter(side, overflowLenAES128GCMRekey) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go index 0d64fb37a1255..f1ea7bb208119 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/conn/record.go @@ -266,10 +266,3 @@ func (p *conn) Write(b []byte) (n int, err error) { } return n, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 6c867dd850153..50721f690acbf 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -128,7 +128,7 @@ type altsHandshaker struct { // NewClientHandshaker creates a core.Handshaker that performs a client-side // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. -func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { +func NewClientHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) { return &altsHandshaker{ stream: nil, conn: c, @@ -141,7 +141,7 @@ func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // NewServerHandshaker creates a core.Handshaker that performs a server-side // ALTS handshake by acting as a proxy between the peer and the ALTS handshaker // service in the metadata server. -func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { +func NewServerHandshaker(_ context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) { return &altsHandshaker{ stream: nil, conn: c, diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go index e1cdafb980cd8..b3af035907292 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/service/service.go @@ -34,8 +34,6 @@ var ( // to a corresponding connection to a hypervisor handshaker service // instance. hsConnMap = make(map[string]*grpc.ClientConn) - // hsDialer will be reassigned in tests. - hsDialer = grpc.Dial ) // Dial dials the handshake service in the hypervisor. If a connection has @@ -50,7 +48,7 @@ func Dial(hsAddress string) (*grpc.ClientConn, error) { // Create a new connection to the handshaker service. Note that // this connection stays open until the application is closed. var err error - hsConn, err = hsDialer(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) + hsConn, err = grpc.Dial(hsAddress, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 38cb5cf0d744c..b7de8f05b7637 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/altscontext.proto package grpc_gcp @@ -201,7 +201,7 @@ func file_grpc_gcp_altscontext_proto_rawDescGZIP() []byte { } var file_grpc_gcp_altscontext_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_altscontext_proto_goTypes = []interface{}{ +var file_grpc_gcp_altscontext_proto_goTypes = []any{ (*AltsContext)(nil), // 0: grpc.gcp.AltsContext nil, // 1: grpc.gcp.AltsContext.PeerAttributesEntry (SecurityLevel)(0), // 2: grpc.gcp.SecurityLevel @@ -225,7 +225,7 @@ func file_grpc_gcp_altscontext_proto_init() { } file_grpc_gcp_transport_security_common_proto_init() if !protoimpl.UnsafeEnabled { - file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_altscontext_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*AltsContext); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 55fc7f65f10d4..79b5dad476c7f 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -533,7 +533,7 @@ type StartServerHandshakeReq struct { // to handshake_parameters is the integer value of HandshakeProtocol enum. HandshakeParameters map[int32]*ServerHandshakeParameters `protobuf:"bytes,2,rep,name=handshake_parameters,json=handshakeParameters,proto3" json:"handshake_parameters,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Bytes in out_frames returned from the peer's HandshakerResp. It is possible - // that the peer's out_frames are split into multiple HandshakReq messages. + // that the peer's out_frames are split into multiple HandshakeReq messages. InBytes []byte `protobuf:"bytes,3,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` // (Optional) Local endpoint information of the connection to the client, // such as local IP address, port number, and network protocol. @@ -1071,7 +1071,7 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, - 0x22, 0xf6, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x22, 0xfb, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x5b, 0x0a, 0x1b, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, @@ -1108,139 +1108,140 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, - 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x19, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, - 0x73, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0c, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, + 0x01, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xaf, + 0x01, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0x80, 0x01, 0x01, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0xa5, 0x04, 0x0a, 0x17, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, - 0x65, 0x71, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x14, 0x68, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, - 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, - 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, - 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, - 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, + 0x65, 0x71, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x68, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0e, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x72, 0x70, 0x63, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, + 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, + 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, + 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, + 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, + 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, + 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, + 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, + 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, + 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, + 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, + 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, + 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, + 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, + 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x1a, 0x6b, 0x0a, 0x18, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, - 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, - 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, - 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, - 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, - 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, - 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, - 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, - 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, - 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, - 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, - 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, - 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, - 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, - 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, - 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, - 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, - 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, - 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, - 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, - 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, - 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, - 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, - 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, - 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, - 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, - 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, - 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, - 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, - 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, - 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, + 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, + 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, + 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, + 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, + 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, + 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, + 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, + 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, + 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, + 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, + 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, + 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, + 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1257,7 +1258,7 @@ func file_grpc_gcp_handshaker_proto_rawDescGZIP() []byte { var file_grpc_gcp_handshaker_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_grpc_gcp_handshaker_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_grpc_gcp_handshaker_proto_goTypes = []interface{}{ +var file_grpc_gcp_handshaker_proto_goTypes = []any{ (HandshakeProtocol)(0), // 0: grpc.gcp.HandshakeProtocol (NetworkProtocol)(0), // 1: grpc.gcp.NetworkProtocol (*Endpoint)(nil), // 2: grpc.gcp.Endpoint @@ -1313,7 +1314,7 @@ func file_grpc_gcp_handshaker_proto_init() { } file_grpc_gcp_transport_security_common_proto_init() if !protoimpl.UnsafeEnabled { - file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*Endpoint); i { case 0: return &v.state @@ -1325,7 +1326,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*Identity); i { case 0: return &v.state @@ -1337,7 +1338,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*StartClientHandshakeReq); i { case 0: return &v.state @@ -1349,7 +1350,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*ServerHandshakeParameters); i { case 0: return &v.state @@ -1361,7 +1362,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*StartServerHandshakeReq); i { case 0: return &v.state @@ -1373,7 +1374,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*NextHandshakeMessageReq); i { case 0: return &v.state @@ -1385,7 +1386,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*HandshakerReq); i { case 0: return &v.state @@ -1397,7 +1398,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[7].Exporter = func(v any, i int) any { switch v := v.(*HandshakerResult); i { case 0: return &v.state @@ -1409,7 +1410,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[8].Exporter = func(v any, i int) any { switch v := v.(*HandshakerStatus); i { case 0: return &v.state @@ -1421,7 +1422,7 @@ func file_grpc_gcp_handshaker_proto_init() { return nil } } - file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_handshaker_proto_msgTypes[9].Exporter = func(v any, i int) any { switch v := v.(*HandshakerResp); i { case 0: return &v.state @@ -1434,12 +1435,12 @@ func file_grpc_gcp_handshaker_proto_init() { } } } - file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []interface{}{ + file_grpc_gcp_handshaker_proto_msgTypes[1].OneofWrappers = []any{ (*Identity_ServiceAccount)(nil), (*Identity_Hostname)(nil), } - file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []interface{}{} - file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []interface{}{ + file_grpc_gcp_handshaker_proto_msgTypes[3].OneofWrappers = []any{} + file_grpc_gcp_handshaker_proto_msgTypes[6].OneofWrappers = []any{ (*HandshakerReq_ClientStart)(nil), (*HandshakerReq_ServerStart)(nil), (*HandshakerReq_Next)(nil), diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index 358074b64946a..34443b1d2dcfd 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/gcp/handshaker.proto package grpc_gcp @@ -75,7 +75,7 @@ type HandshakerService_DoHandshakeClient = grpc.BidiStreamingClient[HandshakerRe // HandshakerServiceServer is the server API for HandshakerService service. // All implementations must embed UnimplementedHandshakerServiceServer -// for forward compatibility +// for forward compatibility. type HandshakerServiceServer interface { // Handshaker service accepts a stream of handshaker request, returning a // stream of handshaker response. Client is expected to send exactly one @@ -87,14 +87,18 @@ type HandshakerServiceServer interface { mustEmbedUnimplementedHandshakerServiceServer() } -// UnimplementedHandshakerServiceServer must be embedded to have forward compatible implementations. -type UnimplementedHandshakerServiceServer struct { -} +// UnimplementedHandshakerServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHandshakerServiceServer struct{} func (UnimplementedHandshakerServiceServer) DoHandshake(grpc.BidiStreamingServer[HandshakerReq, HandshakerResp]) error { return status.Errorf(codes.Unimplemented, "method DoHandshake not implemented") } func (UnimplementedHandshakerServiceServer) mustEmbedUnimplementedHandshakerServiceServer() {} +func (UnimplementedHandshakerServiceServer) testEmbeddedByValue() {} // UnsafeHandshakerServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HandshakerServiceServer will @@ -104,6 +108,13 @@ type UnsafeHandshakerServiceServer interface { } func RegisterHandshakerServiceServer(s grpc.ServiceRegistrar, srv HandshakerServiceServer) { + // If the following call panics, it indicates UnimplementedHandshakerServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&HandshakerService_ServiceDesc, srv) } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 18cc9cfbd5993..6956c14f6a987 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/gcp/transport_security_common.proto package grpc_gcp @@ -253,7 +253,7 @@ func file_grpc_gcp_transport_security_common_proto_rawDescGZIP() []byte { var file_grpc_gcp_transport_security_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_gcp_transport_security_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_gcp_transport_security_common_proto_goTypes = []interface{}{ +var file_grpc_gcp_transport_security_common_proto_goTypes = []any{ (SecurityLevel)(0), // 0: grpc.gcp.SecurityLevel (*RpcProtocolVersions)(nil), // 1: grpc.gcp.RpcProtocolVersions (*RpcProtocolVersions_Version)(nil), // 2: grpc.gcp.RpcProtocolVersions.Version @@ -274,7 +274,7 @@ func file_grpc_gcp_transport_security_common_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_transport_security_common_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*RpcProtocolVersions); i { case 0: return &v.state @@ -286,7 +286,7 @@ func file_grpc_gcp_transport_security_common_proto_init() { return nil } } - file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_gcp_transport_security_common_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*RpcProtocolVersions_Version); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 82bee1443bfee..4c805c64462c9 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials { // NoSecurity. type insecureTC struct{} -func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { +func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil } diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index d475cbc0894c0..328b838ed1f6a 100644 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -38,7 +38,7 @@ type TokenSource struct { } // GetRequestMetadata gets the request metadata as a map from a TokenSource. -func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (ts TokenSource) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { token, err := ts.Token() if err != nil { return nil, err @@ -127,7 +127,7 @@ func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { return oauthAccess{token: *token} } -func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (oa oauthAccess) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { ri, _ := credentials.RequestInfoFromContext(ctx) if err := credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity); err != nil { return nil, fmt.Errorf("unable to transfer oauthAccess PerRPCCredentials: %v", err) @@ -156,7 +156,7 @@ type serviceAccount struct { t *oauth2.Token } -func (s *serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { +func (s *serviceAccount) GetRequestMetadata(ctx context.Context, _ ...string) (map[string]string, error) { s.mu.Lock() defer s.mu.Unlock() if !s.t.Valid() { diff --git a/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go b/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go index 87528b4e23e74..a4b99e3d4a2e9 100644 --- a/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go +++ b/vendor/google.golang.org/grpc/credentials/tls/certprovider/store.go @@ -58,7 +58,7 @@ type wrappedProvider struct { // closedProvider always returns errProviderClosed error. type closedProvider struct{} -func (c closedProvider) KeyMaterial(ctx context.Context) (*KeyMaterial, error) { +func (c closedProvider) KeyMaterial(context.Context) (*KeyMaterial, error) { return nil, errProviderClosed } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index f5453d48a53f3..2b285beee376b 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -33,6 +33,7 @@ import ( "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" ) @@ -60,7 +61,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions - internal.WithRecvBufferPool = withRecvBufferPool + internal.WithBufferPool = withBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -92,7 +93,6 @@ type dialOptions struct { defaultServiceConfigRawJSON *string resolvers []resolver.Builder idleTimeout time.Duration - recvBufferPool SharedBufferPool defaultScheme string maxCallAttempts int } @@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption { // WithKeepaliveParams returns a DialOption that specifies keepalive parameters // for the client transport. +// +// Keepalive is disabled by default. func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { if kp.Time < internal.KeepaliveMinPingTime { logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) @@ -677,11 +679,11 @@ func defaultDialOptions() dialOptions { WriteBufferSize: defaultWriteBufSize, UseProxy: true, UserAgent: grpcUA, + BufferPool: mem.DefaultBufferPool(), }, bs: internalbackoff.DefaultExponential, healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, - recvBufferPool: nopBufferPool{}, defaultScheme: "dns", maxCallAttempts: defaultMaxCallAttempts, } @@ -758,25 +760,8 @@ func WithMaxCallAttempts(n int) DialOption { }) } -// WithRecvBufferPool returns a DialOption that configures the ClientConn -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: WithStatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { - return withRecvBufferPool(bufferPool) -} - -func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { +func withBufferPool(bufferPool mem.BufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.recvBufferPool = bufferPool + o.copts.BufferPool = bufferPool }) } diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go index 0022859ad7465..e7b532b6f806f 100644 --- a/vendor/google.golang.org/grpc/doc.go +++ b/vendor/google.golang.org/grpc/doc.go @@ -16,7 +16,7 @@ * */ -//go:generate ./regenerate.sh +//go:generate ./scripts/regenerate.sh /* Package grpc implements an RPC system called gRPC. diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 5ebf88d7147f2..11d0ae142c429 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -94,7 +94,7 @@ type Codec interface { Name() string } -var registeredCodecs = make(map[string]Codec) +var registeredCodecs = make(map[string]any) // RegisterCodec registers the provided Codec for use with all gRPC clients and // servers. @@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) { // // The content-subtype is expected to be lowercase. func GetCodec(contentSubtype string) Codec { - return registeredCodecs[contentSubtype] + c, _ := registeredCodecs[contentSubtype].(Codec) + return c } diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go new file mode 100644 index 0000000000000..074c5e234a7b3 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package encoding + +import ( + "strings" + + "google.golang.org/grpc/mem" +) + +// CodecV2 defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a CodecV2's +// methods can be called from concurrent goroutines. +type CodecV2 interface { + // Marshal returns the wire format of v. The buffers in the returned + // [mem.BufferSlice] must have at least one reference each, which will be freed + // by gRPC when they are no longer needed. + Marshal(v any) (out mem.BufferSlice, err error) + // Unmarshal parses the wire format into v. Note that data will be freed as soon + // as this function returns. If the codec wishes to guarantee access to the data + // after this function, it must take its own reference that it frees when it is + // no longer needed. + Unmarshal(data mem.BufferSlice, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and +// servers. +// +// The CodecV2 will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the CodecV2. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodecV2 will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If both a Codec and CodecV2 are registered with the same name, the CodecV2 +// will be used. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodecV2(codec CodecV2) { + if codec == nil { + panic("cannot register a nil CodecV2") + } + if codec.Name() == "" { + panic("cannot register CodecV2 with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodecV2(contentSubtype string) CodecV2 { + c, _ := registeredCodecs[contentSubtype].(CodecV2) + return c +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 66d5cdf03ec58..ceec319dd2fb4 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -1,6 +1,6 @@ /* * - * Copyright 2018 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "fmt" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/protoadapt" ) @@ -32,28 +33,51 @@ import ( const Name = "proto" func init() { - encoding.RegisterCodec(codec{}) + encoding.RegisterCodecV2(&codecV2{}) } -// codec is a Codec implementation with protobuf. It is the default codec for gRPC. -type codec struct{} +// codec is a CodecV2 implementation with protobuf. It is the default codec for +// gRPC. +type codecV2 struct{} -func (codec) Marshal(v any) ([]byte, error) { +func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) { vv := messageV2Of(v) if vv == nil { - return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v) } - return proto.Marshal(vv) + size := proto.Size(vv) + if mem.IsBelowBufferPoolingThreshold(size) { + buf, err := proto.Marshal(vv) + if err != nil { + return nil, err + } + data = append(data, mem.SliceBuffer(buf)) + } else { + pool := mem.DefaultBufferPool() + buf := pool.Get(size) + if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil { + pool.Put(buf) + return nil, err + } + data = append(data, mem.NewBuffer(buf, pool)) + } + + return data, nil } -func (codec) Unmarshal(data []byte, v any) error { +func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) { vv := messageV2Of(v) if vv == nil { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) } - return proto.Unmarshal(data, vv) + buf := data.MaterializeToBuffer(mem.DefaultBufferPool()) + defer buf.Free() + // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not + // really possible without a major overhaul of the proto package, but the + // vtprotobuf library may be able to support this. + return proto.Unmarshal(buf.ReadOnlyData(), vv) } func messageV2Of(v any) proto.Message { @@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message { return nil } -func (codec) Name() string { +func (c *codecV2) Name() string { return Name } diff --git a/vendor/google.golang.org/grpc/experimental/experimental.go b/vendor/google.golang.org/grpc/experimental/experimental.go index de7f13a2210ef..7196926365052 100644 --- a/vendor/google.golang.org/grpc/experimental/experimental.go +++ b/vendor/google.golang.org/grpc/experimental/experimental.go @@ -28,38 +28,37 @@ package experimental import ( "google.golang.org/grpc" "google.golang.org/grpc/internal" + "google.golang.org/grpc/mem" ) -// WithRecvBufferPool returns a grpc.DialOption that configures the use of -// bufferPool for parsing incoming messages on a grpc.ClientConn. Depending on -// the application's workload, this could result in reduced memory allocation. +// WithBufferPool returns a grpc.DialOption that configures the use of bufferPool +// for parsing incoming messages on a grpc.ClientConn, and for temporary buffers +// when marshaling outgoing messages. By default, mem.DefaultBufferPool is used, +// and this option only exists to provide alternative buffer pool implementations +// to the client, such as more optimized size allocations etc. However, the +// default buffer pool is already tuned to account for many different use-cases. // -// If you are unsure about how to implement a memory pool but want to utilize -// one, begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the -// following options are used: WithStatsHandler, EnableTracing, or binary -// logging. In such cases, the shared buffer pool will be ignored. -// -// Note: It is not recommended to use the shared buffer pool when compression is -// enabled. -func WithRecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.DialOption { - return internal.WithRecvBufferPool.(func(grpc.SharedBufferPool) grpc.DialOption)(bufferPool) +// Note: The following options will interfere with the buffer pool because they +// require a fully materialized buffer instead of a sequence of buffers: +// EnableTracing, and binary logging. In such cases, materializing the buffer +// will generate a lot of garbage, reducing the overall benefit from using a +// pool. +func WithBufferPool(bufferPool mem.BufferPool) grpc.DialOption { + return internal.WithBufferPool.(func(mem.BufferPool) grpc.DialOption)(bufferPool) } -// RecvBufferPool returns a grpc.ServerOption that configures the server to use -// the provided shared buffer pool for parsing incoming messages. Depending on -// the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize -// one, begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the -// following options are used: StatsHandler, EnableTracing, or binary logging. -// In such cases, the shared buffer pool will be ignored. +// BufferPool returns a grpc.ServerOption that configures the server to use the +// provided buffer pool for parsing incoming messages and for temporary buffers +// when marshaling outgoing messages. By default, mem.DefaultBufferPool is used, +// and this option only exists to provide alternative buffer pool implementations +// to the server, such as more optimized size allocations etc. However, the +// default buffer pool is already tuned to account for many different use-cases. // -// Note: It is not recommended to use the shared buffer pool when compression is -// enabled. -func RecvBufferPool(bufferPool grpc.SharedBufferPool) grpc.ServerOption { - return internal.RecvBufferPool.(func(grpc.SharedBufferPool) grpc.ServerOption)(bufferPool) +// Note: The following options will interfere with the buffer pool because they +// require a fully materialized buffer instead of a sequence of buffers: +// EnableTracing, and binary logging. In such cases, materializing the buffer +// will generate a lot of garbage, reducing the overall benefit from using a +// pool. +func BufferPool(bufferPool mem.BufferPool) grpc.ServerOption { + return internal.BufferPool.(func(mem.BufferPool) grpc.ServerOption)(bufferPool) } diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go new file mode 100644 index 0000000000000..1d827dd5d9d41 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go @@ -0,0 +1,269 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "maps" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" +) + +func init() { + internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting +} + +var logger = grpclog.Component("metrics-registry") + +// DefaultMetrics are the default metrics registered through global metrics +// registry. This is written to at initialization time only, and is read only +// after initialization. +var DefaultMetrics = NewMetrics() + +// MetricDescriptor is the data for a registered metric. +type MetricDescriptor struct { + // The name of this metric. This name must be unique across the whole binary + // (including any per call metrics). See + // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions + // for metric naming conventions. + Name Metric + // The description of this metric. + Description string + // The unit (e.g. entries, seconds) of this metric. + Unit string + // The required label keys for this metric. These are intended to + // metrics emitted from a stats handler. + Labels []string + // The optional label keys for this metric. These are intended to attached + // to metrics emitted from a stats handler if configured. + OptionalLabels []string + // Whether this metric is on by default. + Default bool + // The type of metric. This is set by the metric registry, and not intended + // to be set by a component registering a metric. + Type MetricType + // Bounds are the bounds of this metric. This only applies to histogram + // metrics. If unset or set with length 0, stats handlers will fall back to + // default bounds. + Bounds []float64 +} + +// MetricType is the type of metric. +type MetricType int + +// Type of metric supported by this instrument registry. +const ( + MetricTypeIntCount MetricType = iota + MetricTypeFloatCount + MetricTypeIntHisto + MetricTypeFloatHisto + MetricTypeIntGauge +) + +// Int64CountHandle is a typed handle for a int count metric. This handle +// is passed at the recording point in order to know which metric to record +// on. +type Int64CountHandle MetricDescriptor + +// Descriptor returns the int64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 count value on the metrics recorder provided. +func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Count(h, incr, labels...) +} + +// Float64CountHandle is a typed handle for a float count metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Float64CountHandle MetricDescriptor + +// Descriptor returns the float64 count handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64CountHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 count value on the metrics recorder provided. +func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Count(h, incr, labels...) +} + +// Int64HistoHandle is a typed handle for an int histogram metric. This handle +// is passed at the recording point in order to know which metric to record on. +type Int64HistoHandle MetricDescriptor + +// Descriptor returns the int64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Histo(h, incr, labels...) +} + +// Float64HistoHandle is a typed handle for a float histogram metric. This +// handle is passed at the recording point in order to know which metric to +// record on. +type Float64HistoHandle MetricDescriptor + +// Descriptor returns the float64 histo handle typecast to a pointer to a +// MetricDescriptor. +func (h *Float64HistoHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the float64 histo value on the metrics recorder provided. +func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) { + recorder.RecordFloat64Histo(h, incr, labels...) +} + +// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is +// passed at the recording point in order to know which metric to record on. +type Int64GaugeHandle MetricDescriptor + +// Descriptor returns the int64 gauge handle typecast to a pointer to a +// MetricDescriptor. +func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor { + return (*MetricDescriptor)(h) +} + +// Record records the int64 histo value on the metrics recorder provided. +func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) { + recorder.RecordInt64Gauge(h, incr, labels...) +} + +// registeredMetrics are the registered metric descriptor names. +var registeredMetrics = make(map[Metric]bool) + +// metricsRegistry contains all of the registered metrics. +// +// This is written to only at init time, and read only after that. +var metricsRegistry = make(map[Metric]*MetricDescriptor) + +// DescriptorForMetric returns the MetricDescriptor from the global registry. +// +// Returns nil if MetricDescriptor not present. +func DescriptorForMetric(metric Metric) *MetricDescriptor { + return metricsRegistry[metric] +} + +func registerMetric(name Metric, def bool) { + if registeredMetrics[name] { + logger.Fatalf("metric %v already registered", name) + } + registeredMetrics[name] = true + if def { + DefaultMetrics = DefaultMetrics.Add(name) + } +} + +// RegisterInt64Count registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64CountHandle)(descPtr) +} + +// RegisterFloat64Count registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatCount + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64CountHandle)(descPtr) +} + +// RegisterInt64Histo registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64HistoHandle)(descPtr) +} + +// RegisterFloat64Histo registers the metric description onto the global +// registry. It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeFloatHisto + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Float64HistoHandle)(descPtr) +} + +// RegisterInt64Gauge registers the metric description onto the global registry. +// It returns a typed handle to use to recording data. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple metrics are +// registered with the same name, this function will panic. +func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle { + registerMetric(descriptor.Name, descriptor.Default) + descriptor.Type = MetricTypeIntGauge + descPtr := &descriptor + metricsRegistry[descriptor.Name] = descPtr + return (*Int64GaugeHandle)(descPtr) +} + +// snapshotMetricsRegistryForTesting snapshots the global data of the metrics +// registry. Returns a cleanup function that sets the metrics registry to its +// original state. +func snapshotMetricsRegistryForTesting() func() { + oldDefaultMetrics := DefaultMetrics + oldRegisteredMetrics := registeredMetrics + oldMetricsRegistry := metricsRegistry + + registeredMetrics = make(map[Metric]bool) + metricsRegistry = make(map[Metric]*MetricDescriptor) + maps.Copy(registeredMetrics, registeredMetrics) + maps.Copy(metricsRegistry, metricsRegistry) + + return func() { + DefaultMetrics = oldDefaultMetrics + registeredMetrics = oldRegisteredMetrics + metricsRegistry = oldMetricsRegistry + } +} diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go new file mode 100644 index 0000000000000..3221f7a633a37 --- /dev/null +++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go @@ -0,0 +1,114 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats contains experimental metrics/stats API's. +package stats + +import "maps" + +// MetricsRecorder records on metrics derived from metric registry. +type MetricsRecorder interface { + // RecordInt64Count records the measurement alongside labels on the int + // count associated with the provided handle. + RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string) + // RecordFloat64Count records the measurement alongside labels on the float + // count associated with the provided handle. + RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string) + // RecordInt64Histo records the measurement alongside labels on the int + // histo associated with the provided handle. + RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string) + // RecordFloat64Histo records the measurement alongside labels on the float + // histo associated with the provided handle. + RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string) + // RecordInt64Gauge records the measurement alongside labels on the int + // gauge associated with the provided handle. + RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string) +} + +// Metric is an identifier for a metric. +type Metric string + +// Metrics is a set of metrics to record. Once created, Metrics is immutable, +// however Add and Remove can make copies with specific metrics added or +// removed, respectively. +// +// Do not construct directly; use NewMetrics instead. +type Metrics struct { + // metrics are the set of metrics to initialize. + metrics map[Metric]bool +} + +// NewMetrics returns a Metrics containing Metrics. +func NewMetrics(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Metrics returns the metrics set. The returned map is read-only and must not +// be modified. +func (m *Metrics) Metrics() map[Metric]bool { + return m.metrics +} + +// Add adds the metrics to the metrics set and returns a new copy with the +// additional metrics. +func (m *Metrics) Add(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + newMetrics[metric] = true + } + return &Metrics{ + metrics: newMetrics, + } +} + +// Join joins the metrics passed in with the metrics set, and returns a new copy +// with the merged metrics. +func (m *Metrics) Join(metrics *Metrics) *Metrics { + newMetrics := make(map[Metric]bool) + maps.Copy(newMetrics, m.metrics) + maps.Copy(newMetrics, metrics.metrics) + return &Metrics{ + metrics: newMetrics, + } +} + +// Remove removes the metrics from the metrics set and returns a new copy with +// the metrics removed. +func (m *Metrics) Remove(metrics ...Metric) *Metrics { + newMetrics := make(map[Metric]bool) + for metric := range m.metrics { + newMetrics[metric] = true + } + + for _, metric := range metrics { + delete(newMetrics, metric) + } + return &Metrics{ + metrics: newMetrics, + } +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index ac73c9ced2553..f1ae080dcb816 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -20,8 +20,6 @@ package grpclog import ( "fmt" - - "google.golang.org/grpc/internal/grpclog" ) // componentData records the settings for a component. @@ -33,22 +31,22 @@ var cache = map[string]*componentData{} func (c *componentData) InfoDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.InfoDepth(depth+1, args...) + InfoDepth(depth+1, args...) } func (c *componentData) WarningDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.WarningDepth(depth+1, args...) + WarningDepth(depth+1, args...) } func (c *componentData) ErrorDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.ErrorDepth(depth+1, args...) + ErrorDepth(depth+1, args...) } func (c *componentData) FatalDepth(depth int, args ...any) { args = append([]any{"[" + string(c.name) + "]"}, args...) - grpclog.FatalDepth(depth+1, args...) + FatalDepth(depth+1, args...) } func (c *componentData) Info(args ...any) { diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 16928c9cb993c..db320105e64e2 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -18,18 +18,15 @@ // Package grpclog defines logging for grpc. // -// All logs in transport and grpclb packages only go to verbose level 2. -// All logs in other packages in grpc are logged in spite of the verbosity level. -// -// In the default logger, -// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, -// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. -package grpclog // import "google.golang.org/grpc/grpclog" +// In the default logger, severity level can be set by environment variable +// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by +// GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog import ( "os" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) func init() { @@ -38,58 +35,58 @@ func init() { // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return grpclog.Logger.V(l) + return internal.LoggerV2Impl.V(l) } // Info logs to the INFO log. func Info(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...any) { - grpclog.Logger.Warning(args...) + internal.LoggerV2Impl.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...any) { - grpclog.Logger.Warningf(format, args...) + internal.LoggerV2Impl.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...any) { - grpclog.Logger.Warningln(args...) + internal.LoggerV2Impl.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...any) { - grpclog.Logger.Error(args...) + internal.LoggerV2Impl.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...any) { - grpclog.Logger.Errorf(format, args...) + internal.LoggerV2Impl.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...any) { - grpclog.Logger.Errorln(args...) + internal.LoggerV2Impl.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...any) { - grpclog.Logger.Fatal(args...) + internal.LoggerV2Impl.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -97,15 +94,15 @@ func Fatal(args ...any) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...any) { - grpclog.Logger.Fatalf(format, args...) + internal.LoggerV2Impl.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. -// It calle os.Exit()) with exit code 1. +// It calls os.Exit() with exit code 1. func Fatalln(args ...any) { - grpclog.Logger.Fatalln(args...) + internal.LoggerV2Impl.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -114,19 +111,76 @@ func Fatalln(args ...any) { // // Deprecated: use Info. func Print(args ...any) { - grpclog.Logger.Info(args...) + internal.LoggerV2Impl.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...any) { - grpclog.Logger.Infof(format, args...) + internal.LoggerV2Impl.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...any) { - grpclog.Logger.Infoln(args...) + internal.LoggerV2Impl.Infoln(args...) +} + +// InfoDepth logs to the INFO log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InfoDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.InfoDepth(depth, args...) + } else { + internal.LoggerV2Impl.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WarningDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.WarningDepth(depth, args...) + } else { + internal.LoggerV2Impl.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ErrorDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.ErrorDepth(depth, args...) + } else { + internal.LoggerV2Impl.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FatalDepth(depth int, args ...any) { + if internal.DepthLoggerV2Impl != nil { + internal.DepthLoggerV2Impl.FatalDepth(depth, args...) + } else { + internal.LoggerV2Impl.Fatalln(args...) + } + os.Exit(1) } diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go new file mode 100644 index 0000000000000..59c03bc14c2a9 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the grpclog package. +package internal + +// LoggerV2Impl is the logger used for the non-depth log functions. +var LoggerV2Impl LoggerV2 + +// DepthLoggerV2Impl is the logger used for the depth log functions. +var DepthLoggerV2Impl DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go new file mode 100644 index 0000000000000..e524fdd40b236 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// LoggerWrapper wraps Logger into a LoggerV2. +type LoggerWrapper struct { + Logger +} + +// Info logs to INFO log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Info(args ...any) { + l.Logger.Print(args...) +} + +// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Infoln(args ...any) { + l.Logger.Println(args...) +} + +// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Infof(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Warning(args ...any) { + l.Logger.Print(args...) +} + +// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Warningln(args ...any) { + l.Logger.Println(args...) +} + +// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Warningf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. +func (l *LoggerWrapper) Error(args ...any) { + l.Logger.Print(args...) +} + +// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. +func (l *LoggerWrapper) Errorln(args ...any) { + l.Logger.Println(args...) +} + +// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. +func (l *LoggerWrapper) Errorf(format string, args ...any) { + l.Logger.Printf(format, args...) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (*LoggerWrapper) V(int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go similarity index 52% rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go index bfc45102ab245..07df71e98a87a 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go @@ -1,6 +1,6 @@ /* * - * Copyright 2020 gRPC authors. + * Copyright 2024 gRPC authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,59 +16,17 @@ * */ -// Package grpclog (internal) defines depth logging for grpc. -package grpclog +package internal import ( + "encoding/json" + "fmt" + "io" + "log" "os" ) -// Logger is the logger used for the non-depth log functions. -var Logger LoggerV2 - -// DepthLogger is the logger used for the depth log functions. -var DepthLogger DepthLoggerV2 - -// InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.InfoDepth(depth, args...) - } else { - Logger.Infoln(args...) - } -} - -// WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.WarningDepth(depth, args...) - } else { - Logger.Warningln(args...) - } -} - -// ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.ErrorDepth(depth, args...) - } else { - Logger.Errorln(args...) - } -} - -// FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...any) { - if DepthLogger != nil { - DepthLogger.FatalDepth(depth, args...) - } else { - Logger.Fatalln(args...) - } - os.Exit(1) -} - // LoggerV2 does underlying logging work for grpclog. -// This is a copy of the LoggerV2 defined in the external grpclog package. It -// is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. Info(args ...any) @@ -107,14 +65,13 @@ type LoggerV2 interface { // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. -// This is a copy of the DepthLoggerV2 defined in the external grpclog package. -// It is defined here to avoid a circular dependency. // // # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { + LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. @@ -124,3 +81,124 @@ type DepthLoggerV2 interface { // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...any) } + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// LoggerV2Config configures the LoggerV2 implementation. +type LoggerV2Config struct { + // Verbosity sets the verbosity level of the logger. + Verbosity int + // FormatJSON controls whether the logger should output logs in JSON format. + FormatJSON bool +} + +// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration. +// The infoW, warningW, and errorW writers are used to write log messages of +// different severity levels. +func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.FormatJSON { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON} +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index b1674d8267ca4..4b203585707af 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,70 +18,17 @@ package grpclog -import "google.golang.org/grpc/internal/grpclog" +import "google.golang.org/grpc/grpclog/internal" // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. -type Logger interface { - Fatal(args ...any) - Fatalf(format string, args ...any) - Fatalln(args ...any) - Print(args ...any) - Printf(format string, args ...any) - Println(args ...any) -} +type Logger internal.Logger // SetLogger sets the logger that is used in grpc. Call only from // init() functions. // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - grpclog.Logger = &loggerWrapper{Logger: l} -} - -// loggerWrapper wraps Logger into a LoggerV2. -type loggerWrapper struct { - Logger -} - -func (g *loggerWrapper) Info(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Infoln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Infof(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Warning(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Warningln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Warningf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) Error(args ...any) { - g.Logger.Print(args...) -} - -func (g *loggerWrapper) Errorln(args ...any) { - g.Logger.Println(args...) -} - -func (g *loggerWrapper) Errorf(format string, args ...any) { - g.Logger.Printf(format, args...) -} - -func (g *loggerWrapper) V(l int) bool { - // Returns true for all verbose level. - return true + internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l} } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index ecfd36d713032..892dc13d164b9 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -19,52 +19,16 @@ package grpclog import ( - "encoding/json" - "fmt" "io" - "log" "os" "strconv" "strings" - "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/grpclog/internal" ) // LoggerV2 does underlying logging work for grpclog. -type LoggerV2 interface { - // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...any) - // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...any) - // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...any) - // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...any) - // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...any) - // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...any) - // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...any) - // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...any) - // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...any) - // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...any) - // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...any) - // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - // gRPC ensures that all Fatal logs will exit with os.Exit(1). - // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...any) - // V reports whether verbosity level l is at least the requested verbose level. - V(l int) bool -} +type LoggerV2 internal.LoggerV2 // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. @@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) { if _, ok := l.(*componentData); ok { panic("cannot use component logger as grpclog logger") } - grpclog.Logger = l - grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) -} - -const ( - // infoLog indicates Info severity. - infoLog int = iota - // warningLog indicates Warning severity. - warningLog - // errorLog indicates Error severity. - errorLog - // fatalLog indicates Fatal severity. - fatalLog -) - -// severityName contains the string representation of each severity. -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// loggerT is the default logger used by grpclog. -type loggerT struct { - m []*log.Logger - v int - jsonFormat bool + internal.LoggerV2Impl = l + internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2) } // NewLoggerV2 creates a loggerV2 with the provided writers. @@ -108,32 +46,13 @@ type loggerT struct { // Warning logs will be written to warningW and infoW. // Info logs will be written to infoW. func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{}) } // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and // verbosity level. func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) -} - -type loggerV2Config struct { - verbose int - jsonFormat bool -} - -func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { - var m []*log.Logger - flag := log.LstdFlags - if c.jsonFormat { - flag = 0 - } - m = append(m, log.New(infoW, "", flag)) - m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) - ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. - m = append(m, log.New(ew, "", flag)) - m = append(m, log.New(ew, "", flag)) - return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v}) } // newLoggerV2 creates a loggerV2 to be used as default logger. @@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 { jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") - return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ - verbose: v, - jsonFormat: jsonFormat, - }) -} - -func (g *loggerT) output(severity int, s string) { - sevStr := severityName[severity] - if !g.jsonFormat { - g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) - return - } - // TODO: we can also include the logging component, but that needs more - // (API) changes. - b, _ := json.Marshal(map[string]string{ - "severity": sevStr, - "message": s, + return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{ + Verbosity: v, + FormatJSON: jsonFormat, }) - g.m[severity].Output(2, string(b)) -} - -func (g *loggerT) Info(args ...any) { - g.output(infoLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Infoln(args ...any) { - g.output(infoLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Infof(format string, args ...any) { - g.output(infoLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Warning(args ...any) { - g.output(warningLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Warningln(args ...any) { - g.output(warningLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Warningf(format string, args ...any) { - g.output(warningLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Error(args ...any) { - g.output(errorLog, fmt.Sprint(args...)) -} - -func (g *loggerT) Errorln(args ...any) { - g.output(errorLog, fmt.Sprintln(args...)) -} - -func (g *loggerT) Errorf(format string, args ...any) { - g.output(errorLog, fmt.Sprintf(format, args...)) -} - -func (g *loggerT) Fatal(args ...any) { - g.output(fatalLog, fmt.Sprint(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalln(args ...any) { - g.output(fatalLog, fmt.Sprintln(args...)) - os.Exit(1) -} - -func (g *loggerT) Fatalf(format string, args ...any) { - g.output(fatalLog, fmt.Sprintf(format, args...)) - os.Exit(1) -} - -func (g *loggerT) V(l int) bool { - return l <= g.v } // DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements @@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool { // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. -type DepthLoggerV2 interface { - LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...any) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...any) - // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...any) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...any) -} +type DepthLoggerV2 internal.DepthLoggerV2 diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 38b8835073502..d92335445f650 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_health_v1_health_proto_goTypes = []interface{}{ +var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse @@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckRequest); i { case 0: return &v.state @@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() { return nil } } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*HealthCheckResponse); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index 51b736ba06e5f..f96b8ab4927e1 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -17,8 +17,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/health/v1/health.proto package grpc_health_v1 @@ -32,8 +32,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" @@ -73,7 +73,7 @@ type HealthClient interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) + Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) } type healthClient struct { @@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { +func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &healthWatchClient{ClientStream: stream} + x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -110,26 +110,12 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts . return x, nil } -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse] // HealthServer is the server API for Health service. // All implementations should embed UnimplementedHealthServer -// for forward compatibility +// for forward compatibility. // // Health is gRPC's mechanism for checking whether a server is able to handle // RPCs. Its semantics are documented in @@ -160,19 +146,23 @@ type HealthServer interface { // should assume this method is not supported and should not retry the // call. If the call terminates with any other status (including OK), // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error + Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error } -// UnimplementedHealthServer should be embedded to have forward compatible implementations. -type UnimplementedHealthServer struct { -} +// UnimplementedHealthServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } -func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { +func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } +func (UnimplementedHealthServer) testEmbeddedByValue() {} // UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to HealthServer will @@ -182,6 +172,13 @@ type UnsafeHealthServer interface { } func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { + // If the following call panics, it indicates UnimplementedHealthServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&Health_ServiceDesc, srv) } @@ -208,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { if err := stream.RecvMsg(m); err != nil { return err } - return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream + return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream}) } -type healthWatchServer struct { - grpc.ServerStream -} - -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse] // Health_ServiceDesc is the grpc.ServiceDesc for Health service. // It's only intended for direct use with grpc.RegisterService, diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go index cce6312d77f9c..d4b4b7081590e 100644 --- a/vendor/google.golang.org/grpc/health/server.go +++ b/vendor/google.golang.org/grpc/health/server.go @@ -51,7 +51,7 @@ func NewServer() *Server { } // Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { +func (s *Server) Check(_ context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { s.mu.RLock() defer s.mu.RUnlock() if servingStatus, ok := s.statusMap[in.Service]; ok { diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go index 5496b99dd5c45..31c9cdc9d026b 100644 --- a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go @@ -99,7 +99,7 @@ func (sbc *subBalancerWrapper) startBalancer() { if sbc.balancer == nil { sbc.balancer = gracefulswitch.NewBalancer(sbc, sbc.buildOpts) } - sbc.group.logger.Infof("Creating child policy of type %q for locality %q", sbc.builder.Name(), sbc.id) + sbc.group.logger.Infof("Creating child policy of type %q for child %q", sbc.builder.Name(), sbc.id) sbc.balancer.SwitchTo(sbc.builder) if sbc.ccState != nil { sbc.balancer.UpdateClientConnState(*sbc.ccState) @@ -121,14 +121,11 @@ func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState) sbc.ccState = &s b := sbc.balancer if b == nil { - // This sub-balancer was closed. This should never happen because - // sub-balancers are closed when the locality is removed from EDS, or - // the balancer group is closed. There should be no further address - // updates when either of this happened. - // - // This will be a common case with priority support, because a - // sub-balancer (and the whole balancer group) could be closed because - // it's the lower priority, but it can still get address updates. + // A sub-balancer is closed when it is removed from the group or the + // group is closed as a whole, and is not expected to receive updates + // after that. But when used with the priority LB policy a sub-balancer + // (and the whole balancer group) could be closed because it's the lower + // priority, but it can still get address updates. return nil } return b.UpdateClientConnState(s) @@ -137,14 +134,11 @@ func (sbc *subBalancerWrapper) updateClientConnState(s balancer.ClientConnState) func (sbc *subBalancerWrapper) resolverError(err error) { b := sbc.balancer if b == nil { - // This sub-balancer was closed. This should never happen because - // sub-balancers are closed when the locality is removed from EDS, or - // the balancer group is closed. There should be no further address - // updates when either of this happened. - // - // This will be a common case with priority support, because a - // sub-balancer (and the whole balancer group) could be closed because - // it's the lower priority, but it can still get address updates. + // A sub-balancer is closed when it is removed from the group or the + // group is closed as a whole, and is not expected to receive updates + // after that. But when used with the priority LB policy a sub-balancer + // (and the whole balancer group) could be closed because it's the lower + // priority, but it can still get address updates. return } b.ResolverError(err) @@ -210,7 +204,7 @@ type BalancerGroup struct { // after it's closed. // // We don't share the mutex to avoid deadlocks (e.g. a call to sub-balancer - // may call back to balancer group inline. It causes deaclock if they + // may call back to balancer group inline. It causes deadlock if they // require the same mutex). // // We should never need to hold multiple locks at the same time in this @@ -224,7 +218,7 @@ type BalancerGroup struct { // guards the map from SubConn to balancer ID, so updateSubConnState needs // to hold it shortly to potentially delete from the map. // - // UpdateState is called by the balancer state aggretator, and it will + // UpdateState is called by the balancer state aggregator, and it will // decide when and whether to call. // // The corresponding boolean incomingStarted is used to stop further updates @@ -298,11 +292,11 @@ func (bg *BalancerGroup) Start() { // AddWithClientConn adds a balancer with the given id to the group. The // balancer is built with a balancer builder registered with balancerName. The // given ClientConn is passed to the newly built balancer instead of the -// onepassed to balancergroup.New(). +// one passed to balancergroup.New(). // // TODO: Get rid of the existing Add() API and replace it with this. func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer.ClientConn) error { - bg.logger.Infof("Adding child policy of type %q for locality %q", balancerName, id) + bg.logger.Infof("Adding child policy of type %q for child %q", balancerName, id) builder := balancer.Get(balancerName) if builder == nil { return fmt.Errorf("unregistered balancer name %q", balancerName) @@ -318,7 +312,7 @@ func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer. if bg.outgoingStarted && bg.deletedBalancerCache != nil { if old, ok := bg.deletedBalancerCache.Remove(id); ok { if bg.logger.V(2) { - bg.logger.Infof("Removing and reusing child policy of type %q for locality %q from the balancer cache", balancerName, id) + bg.logger.Infof("Removing and reusing child policy of type %q for child %q from the balancer cache", balancerName, id) bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) } @@ -372,13 +366,13 @@ func (bg *BalancerGroup) Add(id string, builder balancer.Builder) { // closed after timeout. Cleanup work (closing sub-balancer and removing // subconns) will be done after timeout. func (bg *BalancerGroup) Remove(id string) { - bg.logger.Infof("Removing child policy for locality %q", id) + bg.logger.Infof("Removing child policy for child %q", id) bg.outgoingMu.Lock() sbToRemove, ok := bg.idToBalancerConfig[id] if !ok { - bg.logger.Errorf("Child policy for locality %q does not exist in the balancer group", id) + bg.logger.Errorf("Child policy for child %q does not exist in the balancer group", id) bg.outgoingMu.Unlock() return } @@ -394,13 +388,13 @@ func (bg *BalancerGroup) Remove(id string) { if bg.deletedBalancerCache != nil { if bg.logger.V(2) { - bg.logger.Infof("Adding child policy for locality %q to the balancer cache", id) + bg.logger.Infof("Adding child policy for child %q to the balancer cache", id) bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) } bg.deletedBalancerCache.Add(id, sbToRemove, func() { if bg.logger.V(2) { - bg.logger.Infof("Removing child policy for locality %q from the balancer cache after timeout", id) + bg.logger.Infof("Removing child policy for child %q from the balancer cache after timeout", id) bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) } @@ -541,7 +535,7 @@ func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver // aggregator will create an aggregated picker and an aggregated connectivity // state, then forward to ClientConn. func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) { - bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state) + bg.logger.Infof("Balancer state update from child %v, new state: %+v", id, state) // Send new state to the aggregator, without holding the incomingMu. // incomingMu is to protect all calls to the parent ClientConn, this update diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index aa4505a871dfb..9669328914ad4 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go index dfe18b08925d9..64c791953d017 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -46,7 +46,7 @@ type entry interface { // channelMap is the storage data structure for channelz. // -// Methods of channelMap can be divided in two two categories with respect to +// Methods of channelMap can be divided into two categories with respect to // locking. // // 1. Methods acquire the global lock. @@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string { return n } -func min(a, b int) int { - if a < b { - return a - } - return b -} - func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { if maxResults <= 0 { maxResults = EntriesPerPage diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 03e24e1507aa6..078bb81238bc4 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -33,7 +33,7 @@ var ( // outside this package except by tests. IDGen IDGenerator - db *channelMap = newChannelMap() + db = newChannelMap() // EntriesPerPage defines the number of channelz entries to be shown on a web page. EntriesPerPage = 50 curState int32 diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index d1ed8df6a5186..0e6e18e185c7a 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -35,13 +35,13 @@ type SocketOptionData struct { // Getsockopt defines the function to get socket options requested by channelz. // It is to be passed to syscall.RawConn.Control(). // Windows OS doesn't support Socket Option -func (s *SocketOptionData) Getsockopt(fd uintptr) { +func (s *SocketOptionData) Getsockopt(uintptr) { once.Do(func() { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { +func GetSocketOption(any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index d906487139445..452985f8d8f1b 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -45,7 +45,11 @@ var ( // option is present for backward compatibility. This option may be overridden // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true" // or "false". - EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false) + EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true) + // XDSFallbackSupport is the env variable that controls whether support for + // xDS fallback is turned on. If this is unset or is false, only the first + // xDS server in the list of server configs will be used. + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go index 7f7044e1731c8..7617be2158957 100644 --- a/vendor/google.golang.org/grpc/internal/experimental.go +++ b/vendor/google.golang.org/grpc/internal/experimental.go @@ -18,11 +18,11 @@ package internal var ( - // WithRecvBufferPool is implemented by the grpc package and returns a dial + // WithBufferPool is implemented by the grpc package and returns a dial // option to configure a shared buffer pool for a grpc.ClientConn. - WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption - // RecvBufferPool is implemented by the grpc package and returns a server + // BufferPool is implemented by the grpc package and returns a server // option to configure a shared buffer pool for a grpc.Server. - RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption + BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption ) diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go index 6717b757f80dc..43423d8ad9ab1 100644 --- a/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/googlecloud.go @@ -62,9 +62,9 @@ func isRunningOnGCE(manufacturer []byte, goos string) bool { name = strings.TrimSpace(name) return name == "Google" || name == "Google Compute Engine" case "windows": - name = strings.Replace(name, " ", "", -1) - name = strings.Replace(name, "\n", "", -1) - name = strings.Replace(name, "\r", "", -1) + name = strings.ReplaceAll(name, " ", "") + name = strings.ReplaceAll(name, "\n", "") + name = strings.ReplaceAll(name, "\r", "") return name == "Google" default: return false diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go similarity index 63% rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go index faa998de7632b..092ad187a2c8d 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go @@ -16,17 +16,21 @@ * */ +// Package grpclog provides logging functionality for internal gRPC packages, +// outside of the functionality provided by the external `grpclog` package. package grpclog import ( "fmt" + + "google.golang.org/grpc/grpclog" ) // PrefixLogger does logging with a prefix. // // Logging method on a nil logs without any prefix. type PrefixLogger struct { - logger DepthLoggerV2 + logger grpclog.DepthLoggerV2 prefix string } @@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) { pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) return } - InfoDepth(1, fmt.Sprintf(format, args...)) + grpclog.InfoDepth(1, fmt.Sprintf(format, args...)) } // Warningf does warning logging. @@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) { pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) return } - WarningDepth(1, fmt.Sprintf(format, args...)) + grpclog.WarningDepth(1, fmt.Sprintf(format, args...)) } // Errorf does error logging. @@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) { pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) return } - ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -// Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...any) { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - if !Logger.V(2) { - return - } - if pl != nil { - // Handle nil, so the tests can pass in a nil logger. - format = pl.prefix + format - pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) - return - } - InfoDepth(1, fmt.Sprintf(format, args...)) - + grpclog.ErrorDepth(1, fmt.Sprintf(format, args...)) } // V reports whether verbosity level l is at least the requested verbose level. func (pl *PrefixLogger) V(l int) bool { - // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe - // rewrite PrefixLogger a little to ensure that we don't use the global - // `Logger` here, and instead use the `logger` field. - return Logger.V(l) + if pl != nil { + return pl.logger.V(l) + } + return true } // NewPrefixLogger creates a prefix logger with the given prefix. -func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { +func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger { return &PrefixLogger{logger: logger, prefix: prefix} } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index f7f40a16acee5..19b9d639275a8 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { return cs } -// Schedule adds a callback to be scheduled after existing callbacks are run. +// TrySchedule tries to schedules the provided callback function f to be +// executed in the order it was added. This is a best-effort operation. If the +// context passed to NewCallbackSerializer was canceled before this method is +// called, the callback will not be scheduled. // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) { + cs.callbacks.Put(f) +} + +// ScheduleOr schedules the provided callback function f to be executed in the +// order it was added. If the context passed to NewCallbackSerializer has been +// canceled before this method is called, the onFailure callback will be +// executed inline instead. // -// Return value indicates if the callback was successfully added to the list of -// callbacks to be executed by the serializer. It is not possible to add -// callbacks once the context passed to NewCallbackSerializer is cancelled. -func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - return cs.callbacks.Put(f) == nil +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) { + if cs.callbacks.Put(f) != nil { + onFailure() + } } func (cs *CallbackSerializer) run(ctx context.Context) { diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index aef8cec1ab0cd..6d8c2f518dff7 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { if ps.msg != nil { msg := ps.msg - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[sub] { @@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) { ps.msg = msg for sub := range ps.subscribers { s := sub - ps.cs.Schedule(func(context.Context) { + ps.cs.TrySchedule(func(context.Context) { ps.mu.Lock() defer ps.mu.Unlock() if !ps.subscribers[s] { diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 5d66539869232..7aae9240ffc07 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -183,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -203,11 +203,31 @@ var ( // UserSetDefaultScheme is set to true if the user has overridden the // default resolver scheme. - UserSetDefaultScheme bool = false + UserSetDefaultScheme = false // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n // is the number of elements. swap swaps the elements with indexes i and j. ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + + // ConnectedAddress returns the connected address for a SubConnState. The + // address is only valid if the state is READY. + ConnectedAddress any // func (scs SubConnState) resolver.Address + + // SetConnectedAddress sets the connected address for a SubConnState. + SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address) + + // SnapshotMetricRegistryForTesting snapshots the global data of the metric + // registry. Returns a cleanup function that sets the metric registry to its + // original state. Only called in testing functions. + SnapshotMetricRegistryForTesting func() func() + + // SetDefaultBufferPoolForTesting updates the default buffer pool, for + // testing purposes. + SetDefaultBufferPoolForTesting any // func(mem.BufferPool) + + // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for + // testing purposes. + SetBufferPoolingThresholdForTesting any // func(int) ) // HealthChecker defines the signature of the client-side LB channel health diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go index 3244718625cbc..703091047b4b8 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 @@ -313,7 +313,7 @@ func file_grpc_lookup_v1_rls_proto_rawDescGZIP() []byte { var file_grpc_lookup_v1_rls_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_grpc_lookup_v1_rls_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_grpc_lookup_v1_rls_proto_goTypes = []interface{}{ +var file_grpc_lookup_v1_rls_proto_goTypes = []any{ (RouteLookupRequest_Reason)(0), // 0: grpc.lookup.v1.RouteLookupRequest.Reason (*RouteLookupRequest)(nil), // 1: grpc.lookup.v1.RouteLookupRequest (*RouteLookupResponse)(nil), // 2: grpc.lookup.v1.RouteLookupResponse @@ -340,7 +340,7 @@ func file_grpc_lookup_v1_rls_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*RouteLookupRequest); i { case 0: return &v.state @@ -352,7 +352,7 @@ func file_grpc_lookup_v1_rls_proto_init() { return nil } } - file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*RouteLookupResponse); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go index c42cb8cba0c2c..a0be3c8cb268e 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_config.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.1 -// protoc v4.25.2 +// protoc-gen-go v1.34.2 +// protoc v5.27.1 // source: grpc/lookup/v1/rls_config.proto package grpc_lookup_v1 @@ -271,6 +271,8 @@ type HttpKeyBuilder struct { // for example if you are suppressing a lot of information from the URL, but // need to separately cache and request URLs with that content. ConstantKeys map[string]string `protobuf:"bytes,5,rep,name=constant_keys,json=constantKeys,proto3" json:"constant_keys,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If specified, the HTTP method/verb will be extracted under this key name. + Method string `protobuf:"bytes,6,opt,name=method,proto3" json:"method,omitempty"` } func (x *HttpKeyBuilder) Reset() { @@ -340,6 +342,13 @@ func (x *HttpKeyBuilder) GetConstantKeys() map[string]string { return nil } +func (x *HttpKeyBuilder) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + type RouteLookupConfig struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -707,7 +716,7 @@ var file_grpc_lookup_v1_rls_config_proto_rawDesc = []byte{ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0xf1, 0x02, 0x0a, 0x0e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, + 0x38, 0x01, 0x22, 0x89, 0x03, 0x0a, 0x0e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, @@ -726,58 +735,60 @@ var file_grpc_lookup_v1_rls_config_proto_rawDesc = []byte{ 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x4b, 0x65, 0x79, 0x73, 0x1a, 0x3f, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x4b, 0x65, 0x79, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6, 0x04, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x10, - 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, - 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, - 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x68, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x67, 0x72, 0x70, 0x63, 0x5f, - 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, - 0x72, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, - 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x6b, - 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x16, 0x6c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, - 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x36, - 0x0a, 0x09, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x74, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a, 0x3f, 0x0a, + 0x11, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa6, + 0x04, 0x0a, 0x11, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x49, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6b, 0x65, 0x79, + 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, + 0x48, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, + 0x68, 0x74, 0x74, 0x70, 0x4b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, + 0x49, 0x0a, 0x10, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4b, + 0x65, 0x79, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x52, 0x0f, 0x67, 0x72, 0x70, 0x63, 0x4b, + 0x65, 0x79, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x4f, 0x0a, 0x16, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x74, - 0x61, 0x6c, 0x65, 0x41, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4a, 0x04, 0x08, 0x0a, - 0x10, 0x0b, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, - 0x70, 0x0a, 0x1b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x51, - 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, - 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, - 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x42, 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, - 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, + 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, + 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x41, 0x67, 0x65, 0x12, 0x28, + 0x0a, 0x10, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x25, 0x0a, + 0x0e, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x52, 0x1b, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x22, 0x70, 0x0a, 0x1b, 0x52, 0x6f, 0x75, 0x74, 0x65, + 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x51, 0x0a, 0x13, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, + 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, + 0x6b, 0x75, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x53, 0x0a, 0x11, 0x69, 0x6f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x0e, + 0x52, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, + 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -793,7 +804,7 @@ func file_grpc_lookup_v1_rls_config_proto_rawDescGZIP() []byte { } var file_grpc_lookup_v1_rls_config_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_grpc_lookup_v1_rls_config_proto_goTypes = []interface{}{ +var file_grpc_lookup_v1_rls_config_proto_goTypes = []any{ (*NameMatcher)(nil), // 0: grpc.lookup.v1.NameMatcher (*GrpcKeyBuilder)(nil), // 1: grpc.lookup.v1.GrpcKeyBuilder (*HttpKeyBuilder)(nil), // 2: grpc.lookup.v1.HttpKeyBuilder @@ -832,7 +843,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return } if !protoimpl.UnsafeEnabled { - file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[0].Exporter = func(v any, i int) any { switch v := v.(*NameMatcher); i { case 0: return &v.state @@ -844,7 +855,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[1].Exporter = func(v any, i int) any { switch v := v.(*GrpcKeyBuilder); i { case 0: return &v.state @@ -856,7 +867,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[2].Exporter = func(v any, i int) any { switch v := v.(*HttpKeyBuilder); i { case 0: return &v.state @@ -868,7 +879,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[3].Exporter = func(v any, i int) any { switch v := v.(*RouteLookupConfig); i { case 0: return &v.state @@ -880,7 +891,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[4].Exporter = func(v any, i int) any { switch v := v.(*RouteLookupClusterSpecifier); i { case 0: return &v.state @@ -892,7 +903,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[5].Exporter = func(v any, i int) any { switch v := v.(*GrpcKeyBuilder_Name); i { case 0: return &v.state @@ -904,7 +915,7 @@ func file_grpc_lookup_v1_rls_config_proto_init() { return nil } } - file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + file_grpc_lookup_v1_rls_config_proto_msgTypes[6].Exporter = func(v any, i int) any { switch v := v.(*GrpcKeyBuilder_ExtraKeys); i { case 0: return &v.state diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go index 5c7a25efd8409..23dcb2100c3dd 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls_grpc.pb.go @@ -14,8 +14,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 -// - protoc v4.25.2 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.27.1 // source: grpc/lookup/v1/rls.proto package grpc_lookup_v1 @@ -64,21 +64,25 @@ func (c *routeLookupServiceClient) RouteLookup(ctx context.Context, in *RouteLoo // RouteLookupServiceServer is the server API for RouteLookupService service. // All implementations must embed UnimplementedRouteLookupServiceServer -// for forward compatibility +// for forward compatibility. type RouteLookupServiceServer interface { // Lookup returns a target for a single key. RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) mustEmbedUnimplementedRouteLookupServiceServer() } -// UnimplementedRouteLookupServiceServer must be embedded to have forward compatible implementations. -type UnimplementedRouteLookupServiceServer struct { -} +// UnimplementedRouteLookupServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedRouteLookupServiceServer struct{} func (UnimplementedRouteLookupServiceServer) RouteLookup(context.Context, *RouteLookupRequest) (*RouteLookupResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RouteLookup not implemented") } func (UnimplementedRouteLookupServiceServer) mustEmbedUnimplementedRouteLookupServiceServer() {} +func (UnimplementedRouteLookupServiceServer) testEmbeddedByValue() {} // UnsafeRouteLookupServiceServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to RouteLookupServiceServer will @@ -88,6 +92,13 @@ type UnsafeRouteLookupServiceServer interface { } func RegisterRouteLookupServiceServer(s grpc.ServiceRegistrar, srv RouteLookupServiceServer) { + // If the following call panics, it indicates UnimplementedRouteLookupServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&RouteLookupService_ServiceDesc, srv) } diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index afac56572ad55..b901c7bace506 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -55,7 +55,7 @@ func (r *passthroughResolver) start() { r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } -func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {} func (*passthroughResolver) Close() {} diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go new file mode 100644 index 0000000000000..be110d41f9a42 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go @@ -0,0 +1,95 @@ +/* + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package stats + +import ( + "fmt" + + estats "google.golang.org/grpc/experimental/stats" + "google.golang.org/grpc/stats" +) + +// MetricsRecorderList forwards Record calls to all of its metricsRecorders. +// +// It eats any record calls where the label values provided do not match the +// number of label keys. +type MetricsRecorderList struct { + // metricsRecorders are the metrics recorders this list will forward to. + metricsRecorders []estats.MetricsRecorder +} + +// NewMetricsRecorderList creates a new metric recorder list with all the stats +// handlers provided which implement the MetricsRecorder interface. +// If no stats handlers provided implement the MetricsRecorder interface, +// the MetricsRecorder list returned is a no-op. +func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList { + var mrs []estats.MetricsRecorder + for _, sh := range shs { + if mr, ok := sh.(estats.MetricsRecorder); ok { + mrs = append(mrs, mr) + } + } + return &MetricsRecorderList{ + metricsRecorders: mrs, + } +} + +func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) { + if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want { + panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want)) + } +} + +func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Count(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordFloat64Histo(handle, incr, labels...) + } +} + +func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) { + verifyLabels(handle.Descriptor(), labels...) + + for _, metricRecorder := range l.metricsRecorders { + metricRecorder.RecordInt64Gauge(handle, incr, labels...) + } +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc82059525..757925381fe75 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,11 +138,11 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go index 999f52cd75bdb..54c24c2ff3865 100644 --- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -58,20 +58,20 @@ func GetRusage() *Rusage { // CPUTimeDiff returns the differences of user CPU time and system CPU time used // between two Rusage structs. It a no-op function for non-linux environments. -func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { +func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) { log() return 0, 0 } // SetTCPUserTimeout is a no-op function under non-linux environments. -func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { +func SetTCPUserTimeout(net.Conn, time.Duration) error { log() return nil } // GetTCPUserTimeout is a no-op function under non-linux environments. // A negative return value indicates the operation is not supported -func GetTCPUserTimeout(conn net.Conn) (int, error) { +func GetTCPUserTimeout(net.Conn) (int, error) { log() return -1, nil } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go index 078137b7fd705..7e7aaa5463683 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go index fd7d43a8907ba..d5c1085eeaecd 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer { // combination of unconditionally enabling TCP keepalives here, and // disabling the overriding of TCP keepalive parameters by setting the // KeepAlive field to a negative value above, results in OS defaults for - // the TCP keealive interval and time parameters. + // the TCP keepalive interval and time parameters. Control: func(_, _ string, c syscall.RawConn) error { return c.Control(func(fd uintptr) { windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 3deadfb4a20c9..ef72fbb3a0163 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -32,6 +32,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -148,9 +149,9 @@ type dataFrame struct { streamID uint32 endStream bool h []byte - d []byte + reader mem.Reader // onEachWrite is called every time - // a part of d is written out. + // a part of data is written out. onEachWrite func() } @@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream { } // controlBuffer is a way to pass information to loopy. -// Information is passed as specific struct types called control frames. -// A control frame not only represents data, messages or headers to be sent out -// but can also be used to instruct loopy to update its internal state. -// It shouldn't be confused with an HTTP2 frame, although some of the control frames -// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +// +// Information is passed as specific struct types called control frames. A +// control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. It +// shouldn't be confused with an HTTP2 frame, although some of the control +// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { - ch chan struct{} - done <-chan struct{} + wakeupCh chan struct{} // Unblocks readers waiting for something to read. + done <-chan struct{} // Closed when the transport is done. + + // Mutex guards all the fields below, except trfChan which can be read + // atomically without holding mu. mu sync.Mutex - consumerWaiting bool - list *itemList - err error + consumerWaiting bool // True when readers are blocked waiting for new data. + closed bool // True when the controlbuf is finished. + list *itemList // List of queued control frames. // transportResponseFrames counts the number of queued items that represent // the response of an action initiated by the peer. trfChan is created @@ -308,47 +313,59 @@ type controlBuffer struct { // closed and nilled when transportResponseFrames drops below the // threshold. Both fields are protected by mu. transportResponseFrames int - trfChan atomic.Value // chan struct{} + trfChan atomic.Pointer[chan struct{}] } func newControlBuffer(done <-chan struct{}) *controlBuffer { return &controlBuffer{ - ch: make(chan struct{}, 1), - list: &itemList{}, - done: done, + wakeupCh: make(chan struct{}, 1), + list: &itemList{}, + done: done, } } -// throttle blocks if there are too many incomingSettings/cleanupStreams in the -// controlbuf. +// throttle blocks if there are too many frames in the control buf that +// represent the response of an action initiated by the peer, like +// incomingSettings cleanupStreams etc. func (c *controlBuffer) throttle() { - ch, _ := c.trfChan.Load().(chan struct{}) - if ch != nil { + if ch := c.trfChan.Load(); ch != nil { select { - case <-ch: + case <-(*ch): case <-c.done: } } } +// put adds an item to the controlbuf. func (c *controlBuffer) put(it cbItem) error { _, err := c.executeAndPut(nil, it) return err } +// executeAndPut runs f, and if the return value is true, adds the given item to +// the controlbuf. The item could be nil, in which case, this method simply +// executes f and does not add the item to the controlbuf. +// +// The first return value indicates whether the item was successfully added to +// the control buffer. A non-nil error, specifically ErrConnClosing, is returned +// if the control buffer is already closed. func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { - var wakeUp bool c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err + defer c.mu.Unlock() + + if c.closed { + return false, ErrConnClosing } if f != nil { if !f() { // f wasn't successful - c.mu.Unlock() return false, nil } } + if it == nil { + return true, nil + } + + var wakeUp bool if c.consumerWaiting { wakeUp = true c.consumerWaiting = false @@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) { if c.transportResponseFrames == maxQueuedTransportResponseFrames { // We are adding the frame that puts us over the threshold; create // a throttling channel. - c.trfChan.Store(make(chan struct{})) + ch := make(chan struct{}) + c.trfChan.Store(&ch) } } - c.mu.Unlock() if wakeUp { select { - case c.ch <- struct{}{}: + case c.wakeupCh <- struct{}{}: default: } } return true, nil } -// Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return false, c.err - } - if !f(it) { // f wasn't successful - c.mu.Unlock() - return false, nil - } - c.mu.Unlock() - return true, nil -} - +// get returns the next control frame from the control buffer. If block is true +// **and** there are no control frames in the control buffer, the call blocks +// until one of the conditions is met: there is a frame to return or the +// transport is closed. func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() - if c.err != nil { + frame, err := c.getOnceLocked() + if frame != nil || err != nil || !block { + // If we read a frame or an error, we can return to the caller. The + // call to getOnceLocked() returns a nil frame and a nil error if + // there is nothing to read, and in that case, if the caller asked + // us not to block, we can return now as well. c.mu.Unlock() - return nil, c.err - } - if !c.list.isEmpty() { - h := c.list.dequeue().(cbItem) - if h.isTransportResponseFrame() { - if c.transportResponseFrames == maxQueuedTransportResponseFrames { - // We are removing the frame that put us over the - // threshold; close and clear the throttling channel. - ch := c.trfChan.Load().(chan struct{}) - close(ch) - c.trfChan.Store((chan struct{})(nil)) - } - c.transportResponseFrames-- - } - c.mu.Unlock() - return h, nil - } - if !block { - c.mu.Unlock() - return nil, nil + return frame, err } c.consumerWaiting = true c.mu.Unlock() + + // Release the lock above and wait to be woken up. select { - case <-c.ch: + case <-c.wakeupCh: case <-c.done: return nil, errors.New("transport closed by client") } } } +// Callers must not use this method, but should instead use get(). +// +// Caller must hold c.mu. +func (c *controlBuffer) getOnceLocked() (any, error) { + if c.closed { + return false, ErrConnClosing + } + if c.list.isEmpty() { + return nil, nil + } + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Swap(nil) + close(*ch) + } + c.transportResponseFrames-- + } + return h, nil +} + +// finish closes the control buffer, cleaning up any streams that have queued +// header frames. Once this method returns, no more frames can be added to the +// control buffer, and attempts to do so will return ErrConnClosing. func (c *controlBuffer) finish() { c.mu.Lock() - if c.err != nil { - c.mu.Unlock() + defer c.mu.Unlock() + + if c.closed { return } - c.err = ErrConnClosing + c.closed = true // There may be headers for streams in the control buffer. // These streams need to be cleaned out since the transport // is still not aware of these yet. for head := c.list.dequeueAll(); head != nil; head = head.next { - hdr, ok := head.it.(*headerFrame) - if !ok { - continue - } - if hdr.onOrphaned != nil { // It will be nil on the server-side. - hdr.onOrphaned(ErrConnClosing) + switch v := head.it.(type) { + case *headerFrame: + if v.onOrphaned != nil { // It will be nil on the server-side. + v.onOrphaned(ErrConnClosing) + } + case *dataFrame: + _ = v.reader.Close() } } + // In case throttle() is currently in flight, it needs to be unblocked. // Otherwise, the transport may not close, since the transport is closed by // the reader encountering the connection error. - ch, _ := c.trfChan.Load().(chan struct{}) + ch := c.trfChan.Swap(nil) if ch != nil { - close(ch) + close(*ch) } - c.trfChan.Store((chan struct{})(nil)) - c.mu.Unlock() } type side int @@ -466,7 +487,7 @@ const ( // stream maintains a queue of data frames; as loopy receives data frames // it gets added to the queue of the relevant stream. // Loopy goes over this list of active streams by processing one node every iteration, -// thereby closely resemebling to a round-robin scheduling over all streams. While +// thereby closely resembling a round-robin scheduling over all streams. While // processing a stream, loopy writes out data bytes from this stream capped by the min // of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { @@ -490,12 +511,13 @@ type loopyWriter struct { draining bool conn net.Conn logger *grpclog.PrefixLogger + bufferPool mem.BufferPool // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato conn: conn, logger: logger, ssGoAwayHandler: goAwayHandler, + bufferPool: bufferPool, } return l } @@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { // not be established yet. delete(l.estdStreams, c.streamID) str.deleteSelf() + for head := str.itl.dequeueAll(); head != nil; head = head.next { + if df, ok := head.it.(*dataFrame); ok { + _ = df.reader.Close() + } + } } if c.rst { // If RST_STREAM needs to be sent. if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { @@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) { dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. // A data item is represented by a dataFrame, since it later translates into // multiple HTTP2 data frames. - // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. - // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the - // maximum possible HTTP2 frame size. + // Every dataFrame has two buffers; h that keeps grpc-message header and data + // that is the actual message. As an optimization to keep wire traffic low, data + // from data is copied to h to make as big as the maximum possible HTTP2 frame + // size. - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } str.itl.dequeue() // remove the empty data item from stream + _ = dataItem.reader.Close() if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - var ( - buf []byte - ) + // Figure out the maximum size we can send maxSize := http2MaxFrameLen if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. @@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) { } // Compute how much of the header and data we can send within quota and max frame length hSize := min(maxSize, len(dataItem.h)) - dSize := min(maxSize-hSize, len(dataItem.d)) - if hSize != 0 { - if dSize == 0 { - buf = dataItem.h - } else { - // We can add some data to grpc message header to distribute bytes more equally across frames. - // Copy on the stack to avoid generating garbage - var localBuf [http2MaxFrameLen]byte - copy(localBuf[:hSize], dataItem.h) - copy(localBuf[hSize:], dataItem.d[:dSize]) - buf = localBuf[:hSize+dSize] - } + dSize := min(maxSize-hSize, dataItem.reader.Remaining()) + remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize + size := hSize + dSize + + var buf *[]byte + + if hSize != 0 && dSize == 0 { + buf = &dataItem.h } else { - buf = dataItem.d - } + // Note: this is only necessary because the http2.Framer does not support + // partially writing a frame, so the sequence must be materialized into a buffer. + // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed. + pool := l.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + buf = pool.Get(size) + defer pool.Put(buf) - size := hSize + dSize + copy((*buf)[:hSize], dataItem.h) + _, _ = dataItem.reader.Read((*buf)[hSize:]) + } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool // If this is the last data message on this stream and all of it can be written in this iteration. - if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + if dataItem.endStream && remainingBytes == 0 { endStream = true } if dataItem.onEachWrite != nil { dataItem.onEachWrite() } - if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil { return false, err } str.bytesOutStanding += size l.sendQuota -= uint32(size) dataItem.h = dataItem.h[hSize:] - dataItem.d = dataItem.d[dSize:] - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + if remainingBytes == 0 { // All the data from that message was written out. + _ = dataItem.reader.Close() str.itl.dequeue() } if str.itl.isEmpty() { @@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) { } return false, nil } - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 4a3ddce29a4e7..ce878693bd741 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -24,7 +24,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -40,6 +39,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -50,7 +50,7 @@ import ( // NewServerHandlerTransport returns a ServerTransport handling gRPC from // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) { if r.Method != http.MethodPost { w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) @@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentType: contentType, contentSubtype: contentSubtype, stats: stats, + bufferPool: bufferPool, } st.logger = prefixLoggerForServerHandlerTransport(st) @@ -171,6 +172,8 @@ type serverHandlerTransport struct { stats []stats.Handler logger *grpclog.PrefixLogger + + bufferPool mem.BufferPool } func (ht *serverHandlerTransport) Close(err error) { @@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } s.hdrMu.Lock() + defer s.hdrMu.Unlock() if p := st.Proto(); p != nil && len(p.Details) > 0 { delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) @@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } - s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { s.hdrMu.Unlock() } -func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + // Always take a reference because otherwise there is no guarantee the data will + // be available after this function returns. This is what callers to Write + // expect. + data.Ref() headersWritten := s.updateHeaderSent() - return ht.do(func() { + err := ht.do(func() { + defer data.Free() if !headersWritten { ht.writePendingHeaders(s) } ht.rw.Write(hdr) - ht.rw.Write(data) + for _, b := range data { + _, _ = ht.rw.Write(b.ReadOnlyData()) + } ht.rw.(http.Flusher).Flush() }) + if err != nil { + data.Free() + return err + } + return nil } func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { @@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ - reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, windowHandler: func(int) {}, } @@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream go func() { defer close(readerDone) - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) + for { + buf := ht.bufferPool.Get(http2MaxFrameLen) + n, err := req.Body.Read(*buf) if n > 0 { - s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) - buf = buf[n:] + *buf = (*buf)[:n] + s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)}) + } else { + ht.bufferPool.Put(buf) } if err != nil { s.buf.put(recvMsg{err: mapRecvMsgError(err)}) return } - if len(buf) == 0 { - buf = make([]byte, readSize) - } } }() @@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain(debugData string) { +func (ht *serverHandlerTransport) Drain(string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 3c63c706986da..c769deab53c77 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -47,6 +47,7 @@ import ( isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -59,6 +60,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var goAwayLoopyWriterTimeout = 5 * time.Second + var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) // http2Client implements the ClientTransport interface with HTTP2. @@ -144,7 +147,7 @@ type http2Client struct { onClose func(GoAwayReason) - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 logger *grpclog.PrefixLogger @@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } }(conn) - // The following defer and goroutine monitor the connectCtx for cancelation + // The following defer and goroutine monitor the connectCtx for cancellation // and deadline. On context expiration, the connection is hard closed and // this function will naturally fail as a result. Otherwise, the defer // waits for the goroutine to exit to prevent the context from being @@ -346,7 +349,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), keepaliveEnabled: keepaliveEnabled, - bufferPool: newBufferPool(), + bufferPool: opts.BufferPool, onClose: onClose, } var czSecurity credentials.ChannelzSecurityValue @@ -463,7 +466,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) if err := t.loopy.run(); !isIOError(err) { // Immediately close the connection, as the loopy writer returns // when there are no more active streams and we were draining (the @@ -504,7 +507,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { closeStream: func(err error) { t.CloseStream(s, err) }, - freeBuffer: t.bufferPool.put, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -770,7 +772,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, hdr := &headerFrame{ hf: headerFields, endStream: false, - initStream: func(id uint32) error { + initStream: func(uint32) error { t.mu.Lock() // TODO: handle transport closure in loopy instead and remove this // initStream is never called when transport is draining. @@ -983,6 +985,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // only once on a transport. Once it is called, the transport should not be // accessed anymore. func (t *http2Client) Close(err error) { + t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10)) t.mu.Lock() // Make sure we only close once. if t.state == closing { @@ -1006,10 +1009,20 @@ func (t *http2Client) Close(err error) { t.kpDormancyCond.Signal() } t.mu.Unlock() + // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the - // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. + // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It + // also waits for loopyWriter to be closed with a timer to avoid the + // long blocking in case the connection is blackholed, i.e. TCP is + // just stuck. t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err}) - <-t.writerDone + timer := time.NewTimer(goAwayLoopyWriterTimeout) + defer timer.Stop() + select { + case <-t.writerDone: // success + case <-timer.C: + t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout) + } t.cancel() t.conn.Close() channelz.RemoveEntry(t.channelz.ID) @@ -1065,27 +1078,36 @@ func (t *http2Client) GracefulClose() { // Write formats the data into HTTP2 data frame(s) and sends it out. The caller // should proceed only if Write returns nil. -func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error { + reader := data.Reader() + if opts.Last { // If it's the last message, update stream state. if !s.compareAndSwapState(streamActive, streamWriteDone) { + _ = reader.Close() return errStreamDone } } else if s.getState() != streamActive { + _ = reader.Close() return errStreamDone } df := &dataFrame{ streamID: s.id, endStream: opts.Last, h: hdr, - d: data, + reader: reader, } - if hdr != nil || data != nil { // If it's not an empty data frame, check quota. - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return err } } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } func (t *http2Client) getStream(f http2.Frame) *Stream { @@ -1190,10 +1212,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } // The server has closed the stream without sending trailers. Record that @@ -1222,7 +1247,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { if statusCode == codes.Canceled { if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { // Our deadline was already exceeded, and that was likely the cause - // of this cancelation. Alter the status code accordingly. + // of this cancellation. Alter the status code accordingly. statusCode = codes.DeadlineExceeded } } @@ -1307,7 +1332,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { id := f.LastStreamID if id > 0 && id%2 == 0 { t.mu.Unlock() - t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)) return } // A client can receive multiple GoAways from the server (see @@ -1642,11 +1667,10 @@ func (t *http2Client) reader(errCh chan<- error) { t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue - } else { - // Transport error. - t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) - return } + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: @@ -1671,13 +1695,6 @@ func (t *http2Client) reader(errCh chan<- error) { } } -func minTime(a, b time.Duration) time.Duration { - if a < b { - return a - } - return b -} - // keepalive running in a separate goroutine makes sure the connection is alive by sending pings. func (t *http2Client) keepalive() { p := &ping{data: [8]byte{}} @@ -1745,7 +1762,7 @@ func (t *http2Client) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, timeoutLeft) + sleepDuration := min(t.kp.Time, timeoutLeft) timeoutLeft -= sleepDuration timer.Reset(sleepDuration) case <-t.ctx.Done(): diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index b7091165b5013..584b50fe55302 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -39,6 +39,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/mem" "google.golang.org/protobuf/proto" "google.golang.org/grpc/codes" @@ -119,7 +120,7 @@ type http2Server struct { // Fields below are for channelz metric collection. channelz *channelz.Socket - bufferPool *bufferPool + bufferPool mem.BufferPool connectionID uint64 @@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - bufferPool: newBufferPool(), + bufferPool: config.BufferPool, } var czSecurity credentials.ChannelzSecurityValue if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { @@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool) err := t.loopy.run() close(t.loopyWriterDone) if !isIOError(err) { @@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ reader: &recvBufferReader{ - ctx: s.ctx, - ctxDone: s.ctxDone, - recv: s.buf, - freeBuffer: t.bufferPool.put, + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, }, windowHandler: func(n int) { t.updateWindow(s, uint32(n)) @@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) { // guarantee f.Data() is consumed before the arrival of next frame. // Can this copy be eliminated? if len(f.Data()) > 0 { - buffer := t.bufferPool.get() - buffer.Reset() - buffer.Write(f.Data()) - s.write(recvMsg{buffer: buffer}) + pool := t.bufferPool + if pool == nil { + // Note that this is only supposed to be nil in tests. Otherwise, stream is + // always initialized with a BufferPool. + pool = mem.DefaultBufferPool() + } + s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)}) } } if f.StreamEnded() { @@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { onWrite: t.setResetPingStrikes, } - success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + success, err := t.controlBuf.executeAndPut(func() bool { + return t.checkForHeaderListSize(trailingHeader) + }, nil) if !success { if err != nil { return err @@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { // Write converts the data into HTTP2 data frame and sends it out. Non-nil error // is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { +func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error { + reader := data.Reader() + if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { + _ = reader.Close() return err } } else { // Writing headers checks for this condition. if s.getState() == streamDone { + _ = reader.Close() return t.streamContextErr(s) } } + df := &dataFrame{ streamID: s.id, h: hdr, - d: data, + reader: reader, onEachWrite: t.setResetPingStrikes, } - if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil { + _ = reader.Close() return t.streamContextErr(s) } - return t.controlBuf.put(df) + if err := t.controlBuf.put(df); err != nil { + _ = reader.Close() + return err + } + return nil } // keepalive running in a separate goroutine does the following: @@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() { // timeoutLeft. This will ensure that we wait only for kp.Time // before sending out the next ping (for cases where the ping is // acked). - sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + sleepDuration := min(t.kp.Time, kpTimeoutLeft) kpTimeoutLeft -= sleepDuration kpTimer.Reset(sleepDuration) case <-t.done: diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 39cef3bd442eb..3613d7b64817d 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { return w } -func (w *bufWriter) Write(b []byte) (n int, err error) { +func (w *bufWriter) Write(b []byte) (int, error) { if w.err != nil { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - n, err = w.conn.Write(b) + n, err := w.conn.Write(b) return n, toIOError(err) } if w.buf == nil { b := w.pool.Get().(*[]byte) w.buf = *b } + written := 0 for len(b) > 0 { - nn := copy(w.buf[w.offset:], b) - b = b[nn:] - w.offset += nn - n += nn - if w.offset >= w.batchSize { - err = w.flushKeepBuffer() + copied := copy(w.buf[w.offset:], b) + b = b[copied:] + written += copied + w.offset += copied + if w.offset < w.batchSize { + continue + } + if err := w.flushKeepBuffer(); err != nil { + return written, err } } - return n, err + return written, nil } func (w *bufWriter) Flush() error { @@ -389,7 +393,7 @@ type framer struct { fr *http2.Framer } -var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferPoolMap = make(map[int]*sync.Pool) var writeBufferMutex sync.Mutex func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go index 24fa1032574cb..54b2244365444 100644 --- a/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri } return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) } - - return &bufConn{Conn: conn, r: r}, nil + // The buffer could contain extra bytes from the target server, so we can't + // discard it. However, in many cases where the server waits for the client + // to send the first message (e.g. when TLS is being used), the buffer will + // be empty, so we can avoid the overhead of reading through this buffer. + if r.Buffered() != 0 { + return &bufConn{Conn: conn, r: r}, nil + } + return conn, nil } // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 4b39c0ade97c0..fdd6fa86cc15c 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -22,7 +22,6 @@ package transport import ( - "bytes" "context" "errors" "fmt" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" @@ -47,32 +47,10 @@ import ( const logLevel = 2 -type bufferPool struct { - pool sync.Pool -} - -func newBufferPool() *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() any { - return new(bytes.Buffer) - }, - }, - } -} - -func (p *bufferPool) get() *bytes.Buffer { - return p.pool.Get().(*bytes.Buffer) -} - -func (p *bufferPool) put(b *bytes.Buffer) { - p.pool.Put(b) -} - // recvMsg represents the received msg from the transport. All transport // protocol specific info has been removed. type recvMsg struct { - buffer *bytes.Buffer + buffer mem.Buffer // nil: received some data // io.EOF: stream is completed. data is nil. // other non-nil error: transport failure. data is nil. @@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer { func (b *recvBuffer) put(r recvMsg) { b.mu.Lock() if b.err != nil { + // drop the buffer on the floor. Since b.err is not nil, any subsequent reads + // will always return an error, making this buffer inaccessible. + r.buffer.Free() b.mu.Unlock() // An error had occurred earlier, don't accept more // data or errors. @@ -148,45 +129,97 @@ type recvBufferReader struct { ctx context.Context ctxDone <-chan struct{} // cache of ctx.Done() (for performance). recv *recvBuffer - last *bytes.Buffer // Stores the remaining data in the previous calls. + last mem.Buffer // Stores the remaining data in the previous calls. err error - freeBuffer func(*bytes.Buffer) } -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { +func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) { if r.err != nil { return 0, r.err } if r.last != nil { - // Read remaining data left in last call. - copied, _ := r.last.Read(p) - if r.last.Len() == 0 { - r.freeBuffer(r.last) + n, r.last = mem.ReadUnsafe(header, r.last) + return n, nil + } + if r.closeStream != nil { + n, r.err = r.readHeaderClient(header) + } else { + n, r.err = r.readHeader(header) + } + return n, r.err +} + +// Read reads the next n bytes from last. If last is drained, it tries to read +// additional data from recv. It blocks if there no additional data available in +// recv. If Read returns any non-nil error, it will continue to return that +// error. +func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) { + if r.err != nil { + return nil, r.err + } + if r.last != nil { + buf = r.last + if r.last.Len() > n { + buf, r.last = mem.SplitUnsafe(buf, n) + } else { r.last = nil } - return copied, nil + return buf, nil } if r.closeStream != nil { - n, r.err = r.readClient(p) + buf, r.err = r.readClient(n) } else { - n, r.err = r.read(p) + buf, r.err = r.read(n) } - return n, r.err + return buf, r.err } -func (r *recvBufferReader) read(p []byte) (n int, err error) { +func (r *recvBufferReader) readHeader(header []byte) (n int, err error) { select { case <-r.ctxDone: return 0, ContextErr(r.ctx.Err()) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readHeaderAdditional(m, header) + } +} + +func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) { + select { + case <-r.ctxDone: + return nil, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, n) + } +} + +func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readHeaderAdditional(m, header) + case m := <-r.recv.get(): + return r.readHeaderAdditional(m, header) } } -func (r *recvBufferReader) readClient(p []byte) (n int, err error) { +func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) { // If the context is canceled, then closes the stream with nil metadata. // closeStream writes its error parameter to r.recv as a recvMsg. // r.readAdditional acts on that message and returns the necessary error. @@ -207,25 +240,40 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) { // faster. r.closeStream(ContextErr(r.ctx.Err())) m := <-r.recv.get() - return r.readAdditional(m, p) + return r.readAdditional(m, n) case m := <-r.recv.get(): - return r.readAdditional(m, p) + return r.readAdditional(m, n) } } -func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { +func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) { r.recv.load() if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } return 0, m.err } - copied, _ := m.buffer.Read(p) - if m.buffer.Len() == 0 { - r.freeBuffer(m.buffer) - r.last = nil - } else { - r.last = m.buffer + + n, r.last = mem.ReadUnsafe(header, m.buffer) + + return n, nil +} + +func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) { + r.recv.load() + if m.err != nil { + if m.buffer != nil { + m.buffer.Free() + } + return nil, m.err + } + + if m.buffer.Len() > n { + m.buffer, r.last = mem.SplitUnsafe(m.buffer, n) } - return copied, nil + + return m.buffer, nil } type streamState uint32 @@ -241,7 +289,7 @@ const ( type Stream struct { id uint32 st ServerTransport // nil for client side Stream - ct *http2Client // nil for server side Stream + ct ClientTransport // nil for server side Stream ctx context.Context // the associated context of the stream cancel context.CancelFunc // always nil for client side Stream done chan struct{} // closed at the end of stream to unblock writers. On the client side. @@ -251,7 +299,7 @@ type Stream struct { recvCompress string sendCompress string buf *recvBuffer - trReader io.Reader + trReader *transportReader fc *inFlow wq *writeQuota @@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool { return s.noHeaders } -// Trailer returns the cached trailer metedata. Note that if it is not called +// Trailer returns the cached trailer metadata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. // It can be safely read only after stream has ended that is either read @@ -499,36 +547,87 @@ func (s *Stream) write(m recvMsg) { s.buf.put(m) } -// Read reads all p bytes from the wire for this stream. -func (s *Stream) Read(p []byte) (n int, err error) { +func (s *Stream) ReadHeader(header []byte) (err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.er; er != nil { + return er + } + s.requestRead(len(header)) + for len(header) != 0 { + n, err := s.trReader.ReadHeader(header) + header = header[n:] + if len(header) == 0 { + err = nil + } + if err != nil { + if n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + } + return nil +} + +// Read reads n bytes from the wire for this stream. +func (s *Stream) Read(n int) (data mem.BufferSlice, err error) { // Don't request a read if there was an error earlier - if er := s.trReader.(*transportReader).er; er != nil { - return 0, er + if er := s.trReader.er; er != nil { + return nil, er } - s.requestRead(len(p)) - return io.ReadFull(s.trReader, p) + s.requestRead(n) + for n != 0 { + buf, err := s.trReader.Read(n) + var bufLen int + if buf != nil { + bufLen = buf.Len() + } + n -= bufLen + if n == 0 { + err = nil + } + if err != nil { + if bufLen > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + data.Free() + return nil, err + } + data = append(data, buf) + } + return data, nil } -// tranportReader reads all the data available for this Stream from the transport and +// transportReader reads all the data available for this Stream from the transport and // passes them into the decoder, which converts them into a gRPC message stream. // The error is io.EOF when the stream is done or another non-nil error if // the stream broke. type transportReader struct { - reader io.Reader + reader *recvBufferReader // The handler to control the window update procedure for both this // particular stream and the associated transport. windowHandler func(int) er error } -func (t *transportReader) Read(p []byte) (n int, err error) { - n, err = t.reader.Read(p) +func (t *transportReader) ReadHeader(header []byte) (int, error) { + n, err := t.reader.ReadHeader(header) if err != nil { t.er = err - return + return 0, err + } + t.windowHandler(len(header)) + return n, nil +} + +func (t *transportReader) Read(n int) (mem.Buffer, error) { + buf, err := t.reader.Read(n) + if err != nil { + t.er = err + return buf, err } - t.windowHandler(n) - return + t.windowHandler(buf.Len()) + return buf, nil } // BytesReceived indicates whether any bytes have been received on this stream. @@ -574,6 +673,7 @@ type ServerConfig struct { ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 + BufferPool mem.BufferPool } // ConnectOptions covers all relevant options for communicating with the server. @@ -612,6 +712,8 @@ type ConnectOptions struct { MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. UseProxy bool + // The mem.BufferPool to use when reading/writing to the wire. + BufferPool mem.BufferPool } // NewClientTransport establishes the transport with the required ConnectOptions @@ -673,7 +775,7 @@ type ClientTransport interface { // Write sends the data for the given stream. A nil stream indicates // the write is to be performed on the transport as a whole. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // NewStream creates a Stream for an RPC. NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) @@ -725,7 +827,7 @@ type ServerTransport interface { // Write sends the data for the given stream. // Write may not be called on all streams. - Write(s *Stream, hdr []byte, data []byte, opts *Options) error + Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error // WriteStatus sends the status of a stream to the client. WriteStatus is // the final call made on a stream and always occurs. @@ -798,7 +900,7 @@ var ( // connection is draining. This could be caused by goaway or balancer // removing the address. errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") - // errStreamDone is returned from write at the client side to indiacte application + // errStreamDone is returned from write at the client side to indicate application // layer of an error. errStreamDone = errors.New("the stream is done") // StatusGoAway indicates that the server sent a GOAWAY that included this diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go index b8b92a6cb550e..8317859e1e95f 100644 --- a/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go +++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/bootstrap.go @@ -24,35 +24,29 @@ import ( "bytes" "encoding/json" "fmt" + "maps" "net/url" "os" + "slices" "strings" + "sync" "google.golang.org/grpc" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/bootstrap" - "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/structpb" v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3" ) const ( - // The "server_features" field in the bootstrap file contains a list of - // features supported by the server: - // - A value of "xds_v3" indicates that the server supports the v3 version of - // the xDS transport protocol. - // - A value of "ignore_resource_deletion" indicates that the client should - // ignore deletion of Listener and Cluster resources in updates from the - // server. - serverFeaturesV3 = "xds_v3" serverFeaturesIgnoreResourceDeletion = "ignore_resource_deletion" - - gRPCUserAgentName = "gRPC Go" - clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" - clientFeatureResourceWrapper = "xds.config.resource-in-sotw" + gRPCUserAgentName = "gRPC Go" + clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" + clientFeatureResourceWrapper = "xds.config.resource-in-sotw" ) // For overriding in unit tests. @@ -60,12 +54,15 @@ var bootstrapFileReadFunc = os.ReadFile // ChannelCreds contains the credentials to be used while communicating with an // xDS server. It is also used to dedup servers with the same server URI. +// +// This type does not implement custom JSON marshal/unmarshal logic because it +// is straightforward to accomplish the same with json struct tags. type ChannelCreds struct { // Type contains a unique name identifying the credentials type. The only // supported types currently are "google_default" and "insecure". - Type string + Type string `json:"type,omitempty"` // Config contains the JSON configuration associated with the credentials. - Config json.RawMessage + Config json.RawMessage `json:"config,omitempty"` } // Equal reports whether cc and other are considered equal. @@ -87,92 +84,208 @@ func (cc ChannelCreds) String() string { return cc.Type + "-" + string(b) } -// ServerConfig contains the configuration to connect to a server, including -// URI, creds, and transport API version (e.g. v2 or v3). +// ServerConfigs represents a collection of server configurations. +type ServerConfigs []*ServerConfig + +// Equal returns true if scs equals other. +func (scs *ServerConfigs) Equal(other *ServerConfigs) bool { + if len(*scs) != len(*other) { + return false + } + for i := range *scs { + if !(*scs)[i].Equal((*other)[i]) { + return false + } + } + return true +} + +// UnmarshalJSON takes the json data (a list of server configurations) and +// unmarshals it to the struct. +func (scs *ServerConfigs) UnmarshalJSON(data []byte) error { + servers := []*ServerConfig{} + if err := json.Unmarshal(data, &servers); err != nil { + return fmt.Errorf("xds: failed to JSON unmarshal server configurations during bootstrap: %v, config:\n%s", err, string(data)) + } + // Only use the first server config if fallback support is disabled. + if !envconfig.XDSFallbackSupport { + if len(servers) > 1 { + servers = servers[:1] + } + } + *scs = servers + return nil +} + +// String returns a string representation of the ServerConfigs, by concatenating +// the string representations of the underlying server configs. +func (scs *ServerConfigs) String() string { + ret := "" + for i, sc := range *scs { + if i > 0 { + ret += ", " + } + ret += sc.String() + } + return ret +} + +// Authority contains configuration for an xDS control plane authority. // -// It contains unexported fields that are initialized when unmarshaled from JSON -// using either the UnmarshalJSON() method or the ServerConfigFromJSON() -// function. Hence users are strongly encouraged not to use a literal struct -// initialization to create an instance of this type, but instead unmarshal from -// JSON using one of the two available options. -type ServerConfig struct { - // ServerURI is the management server to connect to. +// This type does not implement custom JSON marshal/unmarshal logic because it +// is straightforward to accomplish the same with json struct tags. +type Authority struct { + // ClientListenerResourceNameTemplate is template for the name of the + // Listener resource to subscribe to for a gRPC client channel. Used only + // when the channel is created using an "xds:" URI with this authority name. // - // The bootstrap file contains an ordered list of xDS servers to contact for - // this authority. The first one is picked. - ServerURI string - // Creds contains the credentials to be used while communicationg with this - // xDS server. It is also used to dedup servers with the same server URI. - Creds ChannelCreds - // ServerFeatures contains a list of features supported by this xDS server. - // It is also used to dedup servers with the same server URI and creds. - ServerFeatures []string + // The token "%s", if present in this string, will be replaced + // with %-encoded service authority (i.e., the path part of the target + // URI used to create the gRPC channel). + // + // Must start with "xdstp:///". If it does not, + // that is considered a bootstrap file parsing error. + // + // If not present in the bootstrap file, defaults to + // "xdstp:///envoy.config.listener.v3.Listener/%s". + ClientListenerResourceNameTemplate string `json:"client_listener_resource_name_template,omitempty"` + // XDSServers contains the list of server configurations for this authority. + XDSServers ServerConfigs `json:"xds_servers,omitempty"` +} + +// Equal returns true if a equals other. +func (a *Authority) Equal(other *Authority) bool { + switch { + case a == nil && other == nil: + return true + case (a != nil) != (other != nil): + return false + case a.ClientListenerResourceNameTemplate != other.ClientListenerResourceNameTemplate: + return false + case !a.XDSServers.Equal(&other.XDSServers): + return false + } + return true +} + +// ServerConfig contains the configuration to connect to a server. +type ServerConfig struct { + serverURI string + channelCreds []ChannelCreds + serverFeatures []string // As part of unmarshalling the JSON config into this struct, we ensure that // the credentials config is valid by building an instance of the specified - // credentials and store it here as a grpc.DialOption for easy access when - // dialing this xDS server. + // credentials and store it here for easy access. + selectedCreds ChannelCreds credsDialOption grpc.DialOption - // IgnoreResourceDeletion controls the behavior of the xDS client when the - // server deletes a previously sent Listener or Cluster resource. If set, the - // xDS client will not invoke the watchers' OnResourceDoesNotExist() method - // when a resource is deleted, nor will it remove the existing resource value - // from its cache. - IgnoreResourceDeletion bool + cleanups []func() +} - // Cleanups are called when the xDS client for this server is closed. Allows - // cleaning up resources created specifically for this ServerConfig. - Cleanups []func() +// ServerURI returns the URI of the management server to connect to. +func (sc *ServerConfig) ServerURI() string { + return sc.serverURI } -// CredsDialOption returns the configured credentials as a grpc dial option. +// ChannelCreds returns the credentials configuration to use when communicating +// with this server. Also used to dedup servers with the same server URI. +func (sc *ServerConfig) ChannelCreds() []ChannelCreds { + return sc.channelCreds +} + +// ServerFeatures returns the list of features supported by this server. Also +// used to dedup servers with the same server URI and channel creds. +func (sc *ServerConfig) ServerFeatures() []string { + return sc.serverFeatures +} + +// ServerFeaturesIgnoreResourceDeletion returns true if this server supports a +// feature where the xDS client can ignore resource deletions from this server, +// as described in gRFC A53. +// +// This feature controls the behavior of the xDS client when the server deletes +// a previously sent Listener or Cluster resource. If set, the xDS client will +// not invoke the watchers' OnResourceDoesNotExist() method when a resource is +// deleted, nor will it remove the existing resource value from its cache. +func (sc *ServerConfig) ServerFeaturesIgnoreResourceDeletion() bool { + for _, sf := range sc.serverFeatures { + if sf == serverFeaturesIgnoreResourceDeletion { + return true + } + } + return false +} + +// CredsDialOption returns the first supported transport credentials from the +// configuration, as a dial option. func (sc *ServerConfig) CredsDialOption() grpc.DialOption { return sc.credsDialOption } +// Cleanups returns a collection of functions to be called when the xDS client +// for this server is closed. Allows cleaning up resources created specifically +// for this server. +func (sc *ServerConfig) Cleanups() []func() { + return sc.cleanups +} + +// Equal reports whether sc and other are considered equal. +func (sc *ServerConfig) Equal(other *ServerConfig) bool { + switch { + case sc == nil && other == nil: + return true + case (sc != nil) != (other != nil): + return false + case sc.serverURI != other.serverURI: + return false + case !slices.EqualFunc(sc.channelCreds, other.channelCreds, func(a, b ChannelCreds) bool { return a.Equal(b) }): + return false + case !slices.Equal(sc.serverFeatures, other.serverFeatures): + return false + case !sc.selectedCreds.Equal(other.selectedCreds): + return false + } + return true +} + // String returns the string representation of the ServerConfig. -// -// This string representation will be used as map keys in federation -// (`map[ServerConfig]authority`), so that the xDS ClientConn and stream will be -// shared by authorities with different names but the same server config. -// -// It covers (almost) all the fields so the string can represent the config -// content. It doesn't cover NodeProto because NodeProto isn't used by -// federation. func (sc *ServerConfig) String() string { - features := strings.Join(sc.ServerFeatures, "-") - return strings.Join([]string{sc.ServerURI, sc.Creds.String(), features}, "-") + if len(sc.serverFeatures) == 0 { + return fmt.Sprintf("%s-%s", sc.serverURI, sc.selectedCreds.String()) + } + features := strings.Join(sc.serverFeatures, "-") + return strings.Join([]string{sc.serverURI, sc.selectedCreds.String(), features}, "-") } -// MarshalJSON marshals the ServerConfig to json. -func (sc ServerConfig) MarshalJSON() ([]byte, error) { - server := xdsServer{ - ServerURI: sc.ServerURI, - ChannelCreds: []channelCreds{{Type: sc.Creds.Type, Config: sc.Creds.Config}}, - ServerFeatures: sc.ServerFeatures, - } - server.ServerFeatures = []string{serverFeaturesV3} - if sc.IgnoreResourceDeletion { - server.ServerFeatures = append(server.ServerFeatures, serverFeaturesIgnoreResourceDeletion) +// The following fields correspond 1:1 with the JSON schema for ServerConfig. +type serverConfigJSON struct { + ServerURI string `json:"server_uri,omitempty"` + ChannelCreds []ChannelCreds `json:"channel_creds,omitempty"` + ServerFeatures []string `json:"server_features,omitempty"` +} + +// MarshalJSON returns marshaled JSON bytes corresponding to this server config. +func (sc *ServerConfig) MarshalJSON() ([]byte, error) { + server := &serverConfigJSON{ + ServerURI: sc.serverURI, + ChannelCreds: sc.channelCreds, + ServerFeatures: sc.serverFeatures, } return json.Marshal(server) } // UnmarshalJSON takes the json data (a server) and unmarshals it to the struct. func (sc *ServerConfig) UnmarshalJSON(data []byte) error { - var server xdsServer + server := serverConfigJSON{} if err := json.Unmarshal(data, &server); err != nil { - return fmt.Errorf("xds: json.Unmarshal(data) for field ServerConfig failed during bootstrap: %v", err) + return fmt.Errorf("xds: failed to JSON unmarshal server configuration during bootstrap: %v, config:\n%s", err, string(data)) } - sc.ServerURI = server.ServerURI - sc.ServerFeatures = server.ServerFeatures - for _, f := range server.ServerFeatures { - if f == serverFeaturesIgnoreResourceDeletion { - sc.IgnoreResourceDeletion = true - } - } + sc.serverURI = server.ServerURI + sc.channelCreds = server.ChannelCreds + sc.serverFeatures = server.ServerFeatures + for _, cc := range server.ChannelCreds { // We stop at the first credential type that we support. c := bootstrap.GetCredentials(cc.Type) @@ -183,351 +296,533 @@ func (sc *ServerConfig) UnmarshalJSON(data []byte) error { if err != nil { return fmt.Errorf("failed to build credentials bundle from bootstrap for %q: %v", cc.Type, err) } - sc.Creds = ChannelCreds(cc) + sc.selectedCreds = cc sc.credsDialOption = grpc.WithCredentialsBundle(bundle) - sc.Cleanups = append(sc.Cleanups, cancel) + sc.cleanups = append(sc.cleanups, cancel) break } + if sc.serverURI == "" { + return fmt.Errorf("xds: `server_uri` field in server config cannot be empty: %s", string(data)) + } + if sc.credsDialOption == nil { + return fmt.Errorf("xds: `channel_creds` field in server config cannot be empty: %s", string(data)) + } return nil } -// ServerConfigFromJSON creates a new ServerConfig from the given JSON -// configuration. This is the preferred way of creating a ServerConfig when -// hand-crafting the JSON configuration. -func ServerConfigFromJSON(data []byte) (*ServerConfig, error) { +// ServerConfigTestingOptions specifies options for creating a new ServerConfig +// for testing purposes. +// +// # Testing-Only +type ServerConfigTestingOptions struct { + // URI is the name of the server corresponding to this server config. + URI string + // ChannelCreds contains a list of channel credentials to use when talking + // to this server. If unspecified, `insecure` credentials will be used. + ChannelCreds []ChannelCreds + // ServerFeatures represents the list of features supported by this server. + ServerFeatures []string +} + +// ServerConfigForTesting creates a new ServerConfig from the passed in options, +// for testing purposes. +// +// # Testing-Only +func ServerConfigForTesting(opts ServerConfigTestingOptions) (*ServerConfig, error) { + cc := opts.ChannelCreds + if cc == nil { + cc = []ChannelCreds{{Type: "insecure"}} + } + scInternal := &serverConfigJSON{ + ServerURI: opts.URI, + ChannelCreds: cc, + ServerFeatures: opts.ServerFeatures, + } + scJSON, err := json.Marshal(scInternal) + if err != nil { + return nil, err + } + sc := new(ServerConfig) - if err := sc.UnmarshalJSON(data); err != nil { + if err := sc.UnmarshalJSON(scJSON); err != nil { return nil, err } return sc, nil } -// Equal reports whether sc and other are considered equal. -func (sc *ServerConfig) Equal(other *ServerConfig) bool { +// Config is the internal representation of the bootstrap configuration provided +// to the xDS client. +type Config struct { + xDSServers ServerConfigs + cpcs map[string]certproviderNameAndConfig + serverListenerResourceNameTemplate string + clientDefaultListenerResourceNameTemplate string + authorities map[string]*Authority + node node + + // A map from certprovider instance names to parsed buildable configs. + certProviderConfigs map[string]*certprovider.BuildableConfig +} + +// XDSServers returns the top-level list of management servers to connect to, +// ordered by priority. +func (c *Config) XDSServers() ServerConfigs { + return c.xDSServers +} + +// CertProviderConfigs returns a map from certificate provider plugin instance +// name to their configuration. Callers must not modify the returned map. +func (c *Config) CertProviderConfigs() map[string]*certprovider.BuildableConfig { + return c.certProviderConfigs +} + +// ServerListenerResourceNameTemplate returns template for the name of the +// Listener resource to subscribe to for a gRPC server. +// +// If starts with "xdstp:", will be interpreted as a new-style name, +// in which case the authority of the URI will be used to select the +// relevant configuration in the "authorities" map. +// +// The token "%s", if present in this string, will be replaced with the IP +// and port on which the server is listening. (e.g., "0.0.0.0:8080", +// "[::]:8080"). For example, a value of "example/resource/%s" could become +// "example/resource/0.0.0.0:8080". If the template starts with "xdstp:", +// the replaced string will be %-encoded. +// +// There is no default; if unset, xDS-based server creation fails. +func (c *Config) ServerListenerResourceNameTemplate() string { + return c.serverListenerResourceNameTemplate +} + +// ClientDefaultListenerResourceNameTemplate returns a template for the name of +// the Listener resource to subscribe to for a gRPC client channel. Used only +// when the channel is created with an "xds:" URI with no authority. +// +// If starts with "xdstp:", will be interpreted as a new-style name, +// in which case the authority of the URI will be used to select the +// relevant configuration in the "authorities" map. +// +// The token "%s", if present in this string, will be replaced with +// the service authority (i.e., the path part of the target URI +// used to create the gRPC channel). If the template starts with +// "xdstp:", the replaced string will be %-encoded. +// +// Defaults to "%s". +func (c *Config) ClientDefaultListenerResourceNameTemplate() string { + return c.clientDefaultListenerResourceNameTemplate +} + +// Authorities returns a map of authority name to corresponding configuration. +// Callers must not modify the returned map. +// +// This is used in the following cases: +// - A gRPC client channel is created using an "xds:" URI that includes +// an authority. +// - A gRPC client channel is created using an "xds:" URI with no +// authority, but the "client_default_listener_resource_name_template" +// field above turns it into an "xdstp:" URI. +// - A gRPC server is created and the +// "server_listener_resource_name_template" field is an "xdstp:" URI. +// +// In any of those cases, it is an error if the specified authority is +// not present in this map. +func (c *Config) Authorities() map[string]*Authority { + return c.authorities +} + +// Node returns xDS a v3 Node proto corresponding to the node field in the +// bootstrap configuration, which identifies a specific gRPC instance. +func (c *Config) Node() *v3corepb.Node { + return c.node.toProto() +} + +// Equal returns true if c equals other. +func (c *Config) Equal(other *Config) bool { switch { - case sc == nil && other == nil: + case c == nil && other == nil: return true - case (sc != nil) != (other != nil): + case (c != nil) != (other != nil): + return false + case !c.xDSServers.Equal(&other.xDSServers): + return false + case !maps.EqualFunc(c.certProviderConfigs, other.certProviderConfigs, func(a, b *certprovider.BuildableConfig) bool { return a.String() == b.String() }): + return false + case c.serverListenerResourceNameTemplate != other.serverListenerResourceNameTemplate: return false - case sc.ServerURI != other.ServerURI: + case c.clientDefaultListenerResourceNameTemplate != other.clientDefaultListenerResourceNameTemplate: return false - case !sc.Creds.Equal(other.Creds): + case !maps.EqualFunc(c.authorities, other.authorities, func(a, b *Authority) bool { return a.Equal(b) }): return false - case !equalStringSlice(sc.ServerFeatures, other.ServerFeatures): + case !c.node.Equal(other.node): return false } return true } -func equalStringSlice(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true +// String returns a string representation of the Config. +func (c *Config) String() string { + s, _ := c.MarshalJSON() + return string(s) } -// unmarshalJSONServerConfigSlice unmarshals JSON to a slice. -func unmarshalJSONServerConfigSlice(data []byte) ([]*ServerConfig, error) { - var servers []*ServerConfig - if err := json.Unmarshal(data, &servers); err != nil { - return nil, fmt.Errorf("failed to unmarshal JSON to []*ServerConfig: %v", err) - } - if len(servers) < 1 { - return nil, fmt.Errorf("no management server found in JSON") - } - return servers, nil +// The following fields correspond 1:1 with the JSON schema for Config. +type configJSON struct { + XDSServers ServerConfigs `json:"xds_servers,omitempty"` + CertificateProviders map[string]certproviderNameAndConfig `json:"certificate_providers,omitempty"` + ServerListenerResourceNameTemplate string `json:"server_listener_resource_name_template,omitempty"` + ClientDefaultListenerResourceNameTemplate string `json:"client_default_listener_resource_name_template,omitempty"` + Authorities map[string]*Authority `json:"authorities,omitempty"` + Node node `json:"node,omitempty"` } -// Authority contains configuration for an Authority for an xDS control plane -// server. See the Authorities field in the Config struct for how it's used. -type Authority struct { - // ClientListenerResourceNameTemplate is template for the name of the - // Listener resource to subscribe to for a gRPC client channel. Used only - // when the channel is created using an "xds:" URI with this authority name. - // - // The token "%s", if present in this string, will be replaced - // with %-encoded service authority (i.e., the path part of the target - // URI used to create the gRPC channel). - // - // Must start with "xdstp:///". If it does not, - // that is considered a bootstrap file parsing error. - // - // If not present in the bootstrap file, defaults to - // "xdstp:///envoy.config.listener.v3.Listener/%s". - ClientListenerResourceNameTemplate string - // XDSServer contains the management server and config to connect to for - // this authority. - XDSServer *ServerConfig +// MarshalJSON returns marshaled JSON bytes corresponding to this config. +func (c *Config) MarshalJSON() ([]byte, error) { + config := &configJSON{ + XDSServers: c.xDSServers, + CertificateProviders: c.cpcs, + ServerListenerResourceNameTemplate: c.serverListenerResourceNameTemplate, + ClientDefaultListenerResourceNameTemplate: c.clientDefaultListenerResourceNameTemplate, + Authorities: c.authorities, + Node: c.node, + } + return json.MarshalIndent(config, " ", " ") } -// UnmarshalJSON implement json unmarshaller. -func (a *Authority) UnmarshalJSON(data []byte) error { - var jsonData map[string]json.RawMessage - if err := json.Unmarshal(data, &jsonData); err != nil { - return fmt.Errorf("xds: failed to parse authority: %v", err) +// UnmarshalJSON takes the json data (the complete bootstrap configuration) and +// unmarshals it to the struct. +func (c *Config) UnmarshalJSON(data []byte) error { + // Initialize the node field with client controlled values. This ensures + // even if the bootstrap configuration did not contain the node field, we + // will have a node field with client controlled fields alone. + config := configJSON{Node: newNode()} + if err := json.Unmarshal(data, &config); err != nil { + return fmt.Errorf("xds: json.Unmarshal(%s) failed during bootstrap: %v", string(data), err) } - for k, v := range jsonData { - switch k { - case "xds_servers": - servers, err := unmarshalJSONServerConfigSlice(v) - if err != nil { - return fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err) - } - a.XDSServer = servers[0] - case "client_listener_resource_name_template": - if err := json.Unmarshal(v, &a.ClientListenerResourceNameTemplate); err != nil { - return fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } + c.xDSServers = config.XDSServers + c.cpcs = config.CertificateProviders + c.serverListenerResourceNameTemplate = config.ServerListenerResourceNameTemplate + c.clientDefaultListenerResourceNameTemplate = config.ClientDefaultListenerResourceNameTemplate + c.authorities = config.Authorities + c.node = config.Node + + // Build the certificate providers configuration to ensure that it is valid. + cpcCfgs := make(map[string]*certprovider.BuildableConfig) + getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder) + for instance, nameAndConfig := range c.cpcs { + name := nameAndConfig.PluginName + parser := getBuilder(nameAndConfig.PluginName) + if parser == nil { + // We ignore plugins that we do not know about. + continue } + bc, err := parser.ParseConfig(nameAndConfig.Config) + if err != nil { + return fmt.Errorf("xds: config parsing for certificate provider plugin %q failed during bootstrap: %v", name, err) + } + cpcCfgs[instance] = bc } - return nil -} + c.certProviderConfigs = cpcCfgs -// Config provides the xDS client with several key bits of information that it -// requires in its interaction with the management server. The Config is -// initialized from the bootstrap file. -// -// Users must use one of the NewConfigXxx() functions to create a Config -// instance, and not initialize it manually. -type Config struct { - // XDSServer is the management server to connect to. - // - // The bootstrap file contains a list of servers (with name+creds), but we - // pick the first one. - XDSServer *ServerConfig - // CertProviderConfigs contains a mapping from certificate provider plugin - // instance names to parsed buildable configs. - CertProviderConfigs map[string]*certprovider.BuildableConfig - // ServerListenerResourceNameTemplate is a template for the name of the - // Listener resource to subscribe to for a gRPC server. - // - // If starts with "xdstp:", will be interpreted as a new-style name, - // in which case the authority of the URI will be used to select the - // relevant configuration in the "authorities" map. - // - // The token "%s", if present in this string, will be replaced with the IP - // and port on which the server is listening. (e.g., "0.0.0.0:8080", - // "[::]:8080"). For example, a value of "example/resource/%s" could become - // "example/resource/0.0.0.0:8080". If the template starts with "xdstp:", - // the replaced string will be %-encoded. - // - // There is no default; if unset, xDS-based server creation fails. - ServerListenerResourceNameTemplate string - // A template for the name of the Listener resource to subscribe to - // for a gRPC client channel. Used only when the channel is created - // with an "xds:" URI with no authority. - // - // If starts with "xdstp:", will be interpreted as a new-style name, - // in which case the authority of the URI will be used to select the - // relevant configuration in the "authorities" map. - // - // The token "%s", if present in this string, will be replaced with - // the service authority (i.e., the path part of the target URI - // used to create the gRPC channel). If the template starts with - // "xdstp:", the replaced string will be %-encoded. - // - // Defaults to "%s". - ClientDefaultListenerResourceNameTemplate string - // Authorities is a map of authority name to corresponding configuration. - // - // This is used in the following cases: - // - A gRPC client channel is created using an "xds:" URI that includes - // an authority. - // - A gRPC client channel is created using an "xds:" URI with no - // authority, but the "client_default_listener_resource_name_template" - // field above turns it into an "xdstp:" URI. - // - A gRPC server is created and the - // "server_listener_resource_name_template" field is an "xdstp:" URI. - // - // In any of those cases, it is an error if the specified authority is - // not present in this map. - Authorities map[string]*Authority - // NodeProto contains the Node proto to be used in xDS requests. This will be - // of type *v3corepb.Node. - NodeProto *v3corepb.Node -} - -type channelCreds struct { - Type string `json:"type"` - Config json.RawMessage `json:"config,omitempty"` -} + // Default value of the default client listener name template is "%s". + if c.clientDefaultListenerResourceNameTemplate == "" { + c.clientDefaultListenerResourceNameTemplate = "%s" + } + if len(c.xDSServers) == 0 { + return fmt.Errorf("xds: required field `xds_servers` not found in bootstrap configuration: %s", string(data)) + } -type xdsServer struct { - ServerURI string `json:"server_uri"` - ChannelCreds []channelCreds `json:"channel_creds"` - ServerFeatures []string `json:"server_features"` + // Post-process the authorities' client listener resource template field: + // - if set, it must start with "xdstp:///" + // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s" + for name, authority := range c.authorities { + prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name)) + if authority.ClientListenerResourceNameTemplate == "" { + authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s" + continue + } + if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) { + return fmt.Errorf("xds: field clientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix) + } + } + return nil } -func bootstrapConfigFromEnvVariable() ([]byte, error) { +// GetConfiguration returns the bootstrap configuration initialized by reading +// the bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents +// specified at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the +// former is preferred. +// +// If none of the env vars are set, this function returns the fallback +// configuration if it is not nil. Else, it returns an error. +// +// This function tries to process as much of the bootstrap file as possible (in +// the presence of the errors) and may return a Config object with certain +// fields left unspecified, in which case the caller should use some sane +// defaults. +func GetConfiguration() (*Config, error) { fName := envconfig.XDSBootstrapFileName fContent := envconfig.XDSBootstrapFileContent - // Bootstrap file name has higher priority than bootstrap content. if fName != "" { - // If file name is set - // - If file not found (or other errors), fail - // - Otherwise, use the content. - // - // Note that even if the content is invalid, we don't failover to the - // file content env variable. - logger.Debugf("Using bootstrap file with name %q", fName) - return bootstrapFileReadFunc(fName) + if logger.V(2) { + logger.Infof("Using bootstrap file with name %q from GRPC_XDS_BOOTSTRAP environment variable", fName) + } + cfg, err := bootstrapFileReadFunc(fName) + if err != nil { + return nil, fmt.Errorf("xds: failed to read bootstrap config from file %q: %v", fName, err) + } + return newConfigFromContents(cfg) } if fContent != "" { - return []byte(fContent), nil + if logger.V(2) { + logger.Infof("Using bootstrap contents from GRPC_XDS_BOOTSTRAP_CONFIG environment variable") + } + return newConfigFromContents([]byte(fContent)) + } + + if cfg := fallbackBootstrapConfig(); cfg != nil { + if logger.V(2) { + logger.Infof("Using bootstrap contents from fallback config") + } + return cfg, nil } - return nil, fmt.Errorf("none of the bootstrap environment variables (%q or %q) defined", - envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv) + return nil, fmt.Errorf("bootstrap environment variables (%q or %q) not defined, and no fallback config set", envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv) +} + +func newConfigFromContents(data []byte) (*Config, error) { + // Normalize the input configuration. + buf := bytes.Buffer{} + err := json.Indent(&buf, data, "", "") + if err != nil { + return nil, fmt.Errorf("xds: error normalizing JSON bootstrap configuration: %v", err) + } + data = bytes.TrimSpace(buf.Bytes()) + + config := &Config{} + if err := config.UnmarshalJSON(data); err != nil { + return nil, err + } + return config, nil } -// NewConfig returns a new instance of Config initialized by reading the -// bootstrap file found at ${GRPC_XDS_BOOTSTRAP} or bootstrap contents specified -// at ${GRPC_XDS_BOOTSTRAP_CONFIG}. If both env vars are set, the former is -// preferred. +// ConfigOptionsForTesting specifies options for creating a new bootstrap +// configuration for testing purposes. // -// We support a credential registration mechanism and only credentials -// registered through that mechanism will be accepted here. See package -// `xds/bootstrap` for details. +// # Testing-Only +type ConfigOptionsForTesting struct { + // Servers is the top-level xDS server configuration. It contains a list of + // server configurations. + Servers json.RawMessage + // CertificateProviders is the certificate providers configuration. + CertificateProviders map[string]json.RawMessage + // ServerListenerResourceNameTemplate is the listener resource name template + // to be used on the gRPC server. + ServerListenerResourceNameTemplate string + // ClientDefaultListenerResourceNameTemplate is the default listener + // resource name template to be used on the gRPC client. + ClientDefaultListenerResourceNameTemplate string + // Authorities is a list of non-default authorities. + Authorities map[string]json.RawMessage + // Node identifies the gRPC client/server node in the + // proxyless service mesh. + Node json.RawMessage +} + +// NewContentsForTesting creates a new bootstrap configuration from the passed in +// options, for testing purposes. // -// This function tries to process as much of the bootstrap file as possible (in -// the presence of the errors) and may return a Config object with certain -// fields left unspecified, in which case the caller should use some sane -// defaults. -func NewConfig() (*Config, error) { - // Examples of the bootstrap json can be found in the generator tests - // https://github.com/GoogleCloudPlatform/traffic-director-grpc-bootstrap/blob/master/main_test.go. - data, err := bootstrapConfigFromEnvVariable() +// # Testing-Only +func NewContentsForTesting(opts ConfigOptionsForTesting) ([]byte, error) { + var servers ServerConfigs + if err := json.Unmarshal(opts.Servers, &servers); err != nil { + return nil, err + } + certProviders := make(map[string]certproviderNameAndConfig) + for k, v := range opts.CertificateProviders { + cp := certproviderNameAndConfig{} + if err := json.Unmarshal(v, &cp); err != nil { + return nil, fmt.Errorf("failed to unmarshal certificate provider configuration for %s: %s", k, string(v)) + } + certProviders[k] = cp + } + authorities := make(map[string]*Authority) + for k, v := range opts.Authorities { + a := &Authority{} + if err := json.Unmarshal(v, a); err != nil { + return nil, fmt.Errorf("failed to unmarshal authority configuration for %s: %s", k, string(v)) + } + authorities[k] = a + } + node := newNode() + if err := json.Unmarshal(opts.Node, &node); err != nil { + return nil, fmt.Errorf("failed to unmarshal node configuration %s: %v", string(opts.Node), err) + } + cfgJSON := configJSON{ + XDSServers: servers, + CertificateProviders: certProviders, + ServerListenerResourceNameTemplate: opts.ServerListenerResourceNameTemplate, + ClientDefaultListenerResourceNameTemplate: opts.ClientDefaultListenerResourceNameTemplate, + Authorities: authorities, + Node: node, + } + contents, err := json.MarshalIndent(cfgJSON, " ", " ") if err != nil { - return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err) + return nil, fmt.Errorf("failed to marshal bootstrap configuration for provided options %+v: %v", opts, err) } - return newConfigFromContents(data) + return contents, nil } -// NewConfigFromContents returns a new Config using the specified -// bootstrap file contents instead of reading the environment variable. -func NewConfigFromContents(data []byte) (*Config, error) { - return newConfigFromContents(data) +// NewConfigForTesting creates a new bootstrap configuration from the provided +// contents, for testing purposes. +// +// # Testing-Only +func NewConfigForTesting(contents []byte) (*Config, error) { + return newConfigFromContents(contents) } -func newConfigFromContents(data []byte) (*Config, error) { - config := &Config{} +// certproviderNameAndConfig is the internal representation of +// the`certificate_providers` field in the bootstrap configuration. +type certproviderNameAndConfig struct { + PluginName string `json:"plugin_name"` + Config json.RawMessage `json:"config"` +} - var jsonData map[string]json.RawMessage - if err := json.Unmarshal(data, &jsonData); err != nil { - return nil, fmt.Errorf("xds: failed to parse bootstrap config: %v", err) - } +// locality is the internal representation of the locality field within node. +type locality struct { + Region string `json:"region,omitempty"` + Zone string `json:"zone,omitempty"` + SubZone string `json:"sub_zone,omitempty"` +} - var node *v3corepb.Node - opts := protojson.UnmarshalOptions{DiscardUnknown: true} - for k, v := range jsonData { - switch k { - case "node": - node = &v3corepb.Node{} - if err := opts.Unmarshal(v, node); err != nil { - return nil, fmt.Errorf("xds: protojson.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - case "xds_servers": - servers, err := unmarshalJSONServerConfigSlice(v) - if err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(data) for field %q failed during bootstrap: %v", k, err) - } - config.XDSServer = servers[0] - case "certificate_providers": - var providerInstances map[string]json.RawMessage - if err := json.Unmarshal(v, &providerInstances); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - configs := make(map[string]*certprovider.BuildableConfig) - getBuilder := internal.GetCertificateProviderBuilder.(func(string) certprovider.Builder) - for instance, data := range providerInstances { - var nameAndConfig struct { - PluginName string `json:"plugin_name"` - Config json.RawMessage `json:"config"` - } - if err := json.Unmarshal(data, &nameAndConfig); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), instance, err) - } - - name := nameAndConfig.PluginName - parser := getBuilder(nameAndConfig.PluginName) - if parser == nil { - // We ignore plugins that we do not know about. - continue - } - bc, err := parser.ParseConfig(nameAndConfig.Config) - if err != nil { - return nil, fmt.Errorf("xds: config parsing for plugin %q failed: %v", name, err) - } - configs[instance] = bc - } - config.CertProviderConfigs = configs - case "server_listener_resource_name_template": - if err := json.Unmarshal(v, &config.ServerListenerResourceNameTemplate); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - case "client_default_listener_resource_name_template": - if err := json.Unmarshal(v, &config.ClientDefaultListenerResourceNameTemplate); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - case "authorities": - if err := json.Unmarshal(v, &config.Authorities); err != nil { - return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err) - } - default: - logger.Warningf("Bootstrap content has unknown field: %s", k) - } - // Do not fail the xDS bootstrap when an unknown field is seen. This can - // happen when an older version client reads a newer version bootstrap - // file with new fields. - } +func (l locality) Equal(other locality) bool { + return l.Region == other.Region && l.Zone == other.Zone && l.SubZone == other.SubZone +} + +func (l locality) isEmpty() bool { + return l.Equal(locality{}) +} - if config.ClientDefaultListenerResourceNameTemplate == "" { - // Default value of the default client listener name template is "%s". - config.ClientDefaultListenerResourceNameTemplate = "%s" +type userAgentVersion struct { + UserAgentVersion string `json:"user_agent_version,omitempty"` +} + +// node is the internal representation of the node field in the bootstrap +// configuration. +type node struct { + ID string `json:"id,omitempty"` + Cluster string `json:"cluster,omitempty"` + Locality locality `json:"locality,omitempty"` + Metadata *structpb.Struct `json:"metadata,omitempty"` + + // The following fields are controlled by the client implementation and + // should not unmarshaled from JSON. + userAgentName string + userAgentVersionType userAgentVersion + clientFeatures []string +} + +// newNode is a convenience function to create a new node instance with fields +// controlled by the client implementation set to the desired values. +func newNode() node { + return node{ + userAgentName: gRPCUserAgentName, + userAgentVersionType: userAgentVersion{UserAgentVersion: grpc.Version}, + clientFeatures: []string{clientFeatureNoOverprovisioning, clientFeatureResourceWrapper}, } - if config.XDSServer == nil { - return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers", jsonData["xds_servers"]) +} + +func (n node) Equal(other node) bool { + switch { + case n.ID != other.ID: + return false + case n.Cluster != other.Cluster: + return false + case !n.Locality.Equal(other.Locality): + return false + case n.userAgentName != other.userAgentName: + return false + case n.userAgentVersionType != other.userAgentVersionType: + return false } - if config.XDSServer.ServerURI == "" { - return nil, fmt.Errorf("xds: required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"]) + + // Consider failures in JSON marshaling as being unable to perform the + // comparison, and hence return false. + nMetadata, err := n.Metadata.MarshalJSON() + if err != nil { + return false } - if config.XDSServer.CredsDialOption() == nil { - return nil, fmt.Errorf("xds: required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"]) + otherMetadata, err := other.Metadata.MarshalJSON() + if err != nil { + return false } - // Post-process the authorities' client listener resource template field: - // - if set, it must start with "xdstp:///" - // - if not set, it defaults to "xdstp:///envoy.config.listener.v3.Listener/%s" - for name, authority := range config.Authorities { - prefix := fmt.Sprintf("xdstp://%s", url.PathEscape(name)) - if authority.ClientListenerResourceNameTemplate == "" { - authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s" - continue - } - if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) { - return nil, fmt.Errorf("xds: field ClientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix) - } + if !bytes.Equal(nMetadata, otherMetadata) { + return false } - // Performing post-production on the node information. Some additional fields - // which are not expected to be set in the bootstrap file are populated here. - if node == nil { - node = &v3corepb.Node{} + return slices.Equal(n.clientFeatures, other.clientFeatures) +} + +func (n node) toProto() *v3corepb.Node { + return &v3corepb.Node{ + Id: n.ID, + Cluster: n.Cluster, + Locality: func() *v3corepb.Locality { + if n.Locality.isEmpty() { + return nil + } + return &v3corepb.Locality{ + Region: n.Locality.Region, + Zone: n.Locality.Zone, + SubZone: n.Locality.SubZone, + } + }(), + Metadata: proto.Clone(n.Metadata).(*structpb.Struct), + UserAgentName: n.userAgentName, + UserAgentVersionType: &v3corepb.Node_UserAgentVersion{UserAgentVersion: n.userAgentVersionType.UserAgentVersion}, + ClientFeatures: slices.Clone(n.clientFeatures), } - node.UserAgentName = gRPCUserAgentName - node.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version} - node.ClientFeatures = append(node.ClientFeatures, clientFeatureNoOverprovisioning, clientFeatureResourceWrapper) - config.NodeProto = node +} - if logger.V(2) { - logger.Infof("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config)) +// SetFallbackBootstrapConfig sets the fallback bootstrap configuration to be +// used when the bootstrap environment variables are unset. +// +// The provided configuration must be valid JSON. Returns a non-nil error if +// parsing the provided configuration fails. +func SetFallbackBootstrapConfig(cfgJSON []byte) error { + config, err := newConfigFromContents(cfgJSON) + if err != nil { + return err } - return config, nil + + configMu.Lock() + defer configMu.Unlock() + fallbackBootstrapCfg = config + return nil +} + +// UnsetFallbackBootstrapConfigForTesting unsets the fallback bootstrap +// configuration to be used when the bootstrap environment variables are unset. +// +// # Testing-Only +func UnsetFallbackBootstrapConfigForTesting() { + configMu.Lock() + defer configMu.Unlock() + fallbackBootstrapCfg = nil } + +// fallbackBootstrapConfig returns the fallback bootstrap configuration +// that will be used by the xDS client when the bootstrap environment +// variables are unset. +func fallbackBootstrapConfig() *Config { + configMu.Lock() + defer configMu.Unlock() + return fallbackBootstrapCfg +} + +var ( + configMu sync.Mutex + fallbackBootstrapCfg *Config +) diff --git a/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go b/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go index 9b51fcc839721..ec1a30919ec9a 100644 --- a/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go +++ b/vendor/google.golang.org/grpc/internal/xds/bootstrap/template.go @@ -34,7 +34,7 @@ func PopulateResourceTemplate(template, target string) string { if strings.HasPrefix(template, "xdstp:") { target = percentEncode(target) } - return strings.Replace(template, "%s", target, -1) + return strings.ReplaceAll(template, "%s", target) } // percentEncode percent encode t, except for "/". See the tests for examples. diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go b/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go index 713e39cf31cb9..fb599954a6c18 100644 --- a/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/converter.go @@ -59,11 +59,11 @@ func buildLogger(loggerConfig *v3rbacpb.RBAC_AuditLoggingOptions_AuditLoggerConf } func getCustomConfig(config *anypb.Any) (json.RawMessage, string, error) { - any, err := config.UnmarshalNew() + c, err := config.UnmarshalNew() if err != nil { return nil, "", err } - switch m := any.(type) { + switch m := c.(type) { case *v1xdsudpatypepb.TypedStruct: return convertCustomConfig(m.TypeUrl, m.Value) case *v3xdsxdstypepb.TypedStruct: diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go index c9f71d32cbb28..e1c15018bde08 100644 --- a/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/matchers.go @@ -244,7 +244,7 @@ func (am *andMatcher) match(data *rpcData) bool { type alwaysMatcher struct { } -func (am *alwaysMatcher) match(data *rpcData) bool { +func (am *alwaysMatcher) match(*rpcData) bool { return true } diff --git a/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go index 33011726a6f60..344052cb04fd1 100644 --- a/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go +++ b/vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go @@ -237,12 +237,9 @@ func newRPCData(ctx context.Context) (*rpcData, error) { var authType string var peerCertificates []*x509.Certificate - if pi.AuthInfo != nil { - tlsInfo, ok := pi.AuthInfo.(credentials.TLSInfo) - if ok { - authType = pi.AuthInfo.AuthType() - peerCertificates = tlsInfo.State.PeerCertificates - } + if tlsInfo, ok := pi.AuthInfo.(credentials.TLSInfo); ok { + authType = pi.AuthInfo.AuthType() + peerCertificates = tlsInfo.State.PeerCertificates } return &rpcData{ @@ -281,11 +278,12 @@ func (e *engine) doAuditLogging(rpcData *rpcData, rule string, authorized bool) // In the RBAC world, we need to have a SPIFFE ID as the principal for this // to be meaningful principal := "" - if rpcData.peerInfo != nil && rpcData.peerInfo.AuthInfo != nil && rpcData.peerInfo.AuthInfo.AuthType() == "tls" { + if rpcData.peerInfo != nil { // If AuthType = tls, then we can cast AuthInfo to TLSInfo. - tlsInfo := rpcData.peerInfo.AuthInfo.(credentials.TLSInfo) - if tlsInfo.SPIFFEID != nil { - principal = tlsInfo.SPIFFEID.String() + if tlsInfo, ok := rpcData.peerInfo.AuthInfo.(credentials.TLSInfo); ok { + if tlsInfo.SPIFFEID != nil { + principal = tlsInfo.SPIFFEID.String() + } } } diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go index 34d31b5e7d311..eb42b19fb99a1 100644 --- a/vendor/google.golang.org/grpc/keepalive/keepalive.go +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -34,15 +34,29 @@ type ClientParameters struct { // After a duration of this time if the client doesn't see any activity it // pings the server to see if the transport is still alive. // If set below 10s, a minimum value of 10s will be used instead. - Time time.Duration // The current default value is infinity. + // + // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5 + // minutes (which means the client shouldn't ping more frequently than every + // 5 minutes). + // + // Though not ideal, it's not a strong requirement for Time to be less than + // EnforcementPolicy.MinTime. Time will automatically double if the server + // disconnects due to its enforcement policy. + // + // For more details, see + // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md + Time time.Duration // After having pinged for keepalive check, the client waits for a duration // of Timeout and if no activity is seen even after that the connection is // closed. - Timeout time.Duration // The current default value is 20 seconds. + // + // If keepalive is enabled, and this value is not explicitly set, the default + // is 20 seconds. + Timeout time.Duration // If true, client sends keepalive pings even with no active RPCs. If false, // when there are no active RPCs, Time and Timeout will be ignored and no // keepalive pings will be sent. - PermitWithoutStream bool // false by default. + PermitWithoutStream bool } // ServerParameters is used to set keepalive and max-age parameters on the diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go new file mode 100644 index 0000000000000..c37c58c0233ec --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go @@ -0,0 +1,194 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "sort" + "sync" + + "google.golang.org/grpc/internal" +) + +// BufferPool is a pool of buffers that can be shared and reused, resulting in +// decreased memory allocation. +type BufferPool interface { + // Get returns a buffer with specified length from the pool. + Get(length int) *[]byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +var defaultBufferPoolSizes = []int{ + 256, + 4 << 10, // 4KB (go page size) + 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC) + 32 << 10, // 32KB (default buffer size for io.Copy) + 1 << 20, // 1MB +} + +var defaultBufferPool BufferPool + +func init() { + defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...) + + internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) { + defaultBufferPool = pool + } + + internal.SetBufferPoolingThresholdForTesting = func(threshold int) { + bufferPoolingThreshold = threshold + } +} + +// DefaultBufferPool returns the current default buffer pool. It is a BufferPool +// created with NewBufferPool that uses a set of default sizes optimized for +// expected workflows. +func DefaultBufferPool() BufferPool { + return defaultBufferPool +} + +// NewTieredBufferPool returns a BufferPool implementation that uses multiple +// underlying pools of the given pool sizes. +func NewTieredBufferPool(poolSizes ...int) BufferPool { + sort.Ints(poolSizes) + pools := make([]*sizedBufferPool, len(poolSizes)) + for i, s := range poolSizes { + pools[i] = newSizedBufferPool(s) + } + return &tieredBufferPool{ + sizedPools: pools, + } +} + +// tieredBufferPool implements the BufferPool interface with multiple tiers of +// buffer pools for different sizes of buffers. +type tieredBufferPool struct { + sizedPools []*sizedBufferPool + fallbackPool simpleBufferPool +} + +func (p *tieredBufferPool) Get(size int) *[]byte { + return p.getPool(size).Get(size) +} + +func (p *tieredBufferPool) Put(buf *[]byte) { + p.getPool(cap(*buf)).Put(buf) +} + +func (p *tieredBufferPool) getPool(size int) BufferPool { + poolIdx := sort.Search(len(p.sizedPools), func(i int) bool { + return p.sizedPools[i].defaultSize >= size + }) + + if poolIdx == len(p.sizedPools) { + return &p.fallbackPool + } + + return p.sizedPools[poolIdx] +} + +// sizedBufferPool is a BufferPool implementation that is optimized for specific +// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size +// of 16kb and a sizedBufferPool can be configured to only return buffers with a +// capacity of 16kb. Note that however it does not support returning larger +// buffers and in fact panics if such a buffer is requested. Because of this, +// this BufferPool implementation is not meant to be used on its own and rather +// is intended to be embedded in a tieredBufferPool such that Get is only +// invoked when the required size is smaller than or equal to defaultSize. +type sizedBufferPool struct { + pool sync.Pool + defaultSize int +} + +func (p *sizedBufferPool) Get(size int) *[]byte { + buf := p.pool.Get().(*[]byte) + b := *buf + clear(b[:cap(b)]) + *buf = b[:size] + return buf +} + +func (p *sizedBufferPool) Put(buf *[]byte) { + if cap(*buf) < p.defaultSize { + // Ignore buffers that are too small to fit in the pool. Otherwise, when + // Get is called it will panic as it tries to index outside the bounds + // of the buffer. + return + } + p.pool.Put(buf) +} + +func newSizedBufferPool(size int) *sizedBufferPool { + return &sizedBufferPool{ + pool: sync.Pool{ + New: func() any { + buf := make([]byte, size) + return &buf + }, + }, + defaultSize: size, + } +} + +var _ BufferPool = (*simpleBufferPool)(nil) + +// simpleBufferPool is an implementation of the BufferPool interface that +// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to +// acquire a buffer from the pool but if that buffer is too small, it returns it +// to the pool and creates a new one. +type simpleBufferPool struct { + pool sync.Pool +} + +func (p *simpleBufferPool) Get(size int) *[]byte { + bs, ok := p.pool.Get().(*[]byte) + if ok && cap(*bs) >= size { + *bs = (*bs)[:size] + return bs + } + + // A buffer was pulled from the pool, but it is too small. Put it back in + // the pool and create one large enough. + if ok { + p.pool.Put(bs) + } + + b := make([]byte, size) + return &b +} + +func (p *simpleBufferPool) Put(buf *[]byte) { + p.pool.Put(buf) +} + +var _ BufferPool = NopBufferPool{} + +// NopBufferPool is a buffer pool that returns new buffers without pooling. +type NopBufferPool struct{} + +// Get returns a buffer with specified length from the pool. +func (NopBufferPool) Get(length int) *[]byte { + b := make([]byte, length) + return &b +} + +// Put returns a buffer to the pool. +func (NopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go new file mode 100644 index 0000000000000..228e9c2f20f26 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go @@ -0,0 +1,226 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package mem + +import ( + "io" +) + +// BufferSlice offers a means to represent data that spans one or more Buffer +// instances. A BufferSlice is meant to be immutable after creation, and methods +// like Ref create and return copies of the slice. This is why all methods have +// value receivers rather than pointer receivers. +// +// Note that any of the methods that read the underlying buffers such as Ref, +// Len or CopyTo etc., will panic if any underlying buffers have already been +// freed. It is recommended to not directly interact with any of the underlying +// buffers directly, rather such interactions should be mediated through the +// various methods on this type. +// +// By convention, any APIs that return (mem.BufferSlice, error) should reduce +// the burden on the caller by never returning a mem.BufferSlice that needs to +// be freed if the error is non-nil, unless explicitly stated. +type BufferSlice []Buffer + +// Len returns the sum of the length of all the Buffers in this slice. +// +// # Warning +// +// Invoking the built-in len on a BufferSlice will return the number of buffers +// in the slice, and *not* the value returned by this function. +func (s BufferSlice) Len() int { + var length int + for _, b := range s { + length += b.Len() + } + return length +} + +// Ref invokes Ref on each buffer in the slice. +func (s BufferSlice) Ref() { + for _, b := range s { + b.Ref() + } +} + +// Free invokes Buffer.Free() on each Buffer in the slice. +func (s BufferSlice) Free() { + for _, b := range s { + b.Free() + } +} + +// CopyTo copies each of the underlying Buffer's data into the given buffer, +// returning the number of bytes copied. Has the same semantics as the copy +// builtin in that it will copy as many bytes as it can, stopping when either dst +// is full or s runs out of data, returning the minimum of s.Len() and len(dst). +func (s BufferSlice) CopyTo(dst []byte) int { + off := 0 + for _, b := range s { + off += copy(dst[off:], b.ReadOnlyData()) + } + return off +} + +// Materialize concatenates all the underlying Buffer's data into a single +// contiguous buffer using CopyTo. +func (s BufferSlice) Materialize() []byte { + l := s.Len() + if l == 0 { + return nil + } + out := make([]byte, l) + s.CopyTo(out) + return out +} + +// MaterializeToBuffer functions like Materialize except that it writes the data +// to a single Buffer pulled from the given BufferPool. +// +// As a special case, if the input BufferSlice only actually has one Buffer, this +// function simply increases the refcount before returning said Buffer. Freeing this +// buffer won't release it until the BufferSlice is itself released. +func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer { + if len(s) == 1 { + s[0].Ref() + return s[0] + } + sLen := s.Len() + if sLen == 0 { + return emptyBuffer{} + } + buf := pool.Get(sLen) + s.CopyTo(*buf) + return NewBuffer(buf, pool) +} + +// Reader returns a new Reader for the input slice after taking references to +// each underlying buffer. +func (s BufferSlice) Reader() Reader { + s.Ref() + return &sliceReader{ + data: s, + len: s.Len(), + } +} + +// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface +// with other parts systems. It also provides an additional convenience method +// Remaining(), which returns the number of unread bytes remaining in the slice. +// Buffers will be freed as they are read. +type Reader interface { + io.Reader + io.ByteReader + // Close frees the underlying BufferSlice and never returns an error. Subsequent + // calls to Read will return (0, io.EOF). + Close() error + // Remaining returns the number of unread bytes remaining in the slice. + Remaining() int +} + +type sliceReader struct { + data BufferSlice + len int + // The index into data[0].ReadOnlyData(). + bufferIdx int +} + +func (r *sliceReader) Remaining() int { + return r.len +} + +func (r *sliceReader) Close() error { + r.data.Free() + r.data = nil + r.len = 0 + return nil +} + +func (r *sliceReader) freeFirstBufferIfEmpty() bool { + if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) { + return false + } + + r.data[0].Free() + r.data = r.data[1:] + r.bufferIdx = 0 + return true +} + +func (r *sliceReader) Read(buf []byte) (n int, _ error) { + if r.len == 0 { + return 0, io.EOF + } + + for len(buf) != 0 && r.len != 0 { + // Copy as much as possible from the first Buffer in the slice into the + // given byte slice. + data := r.data[0].ReadOnlyData() + copied := copy(buf, data[r.bufferIdx:]) + r.len -= copied // Reduce len by the number of bytes copied. + r.bufferIdx += copied // Increment the buffer index. + n += copied // Increment the total number of bytes read. + buf = buf[copied:] // Shrink the given byte slice. + + // If we have copied all the data from the first Buffer, free it and advance to + // the next in the slice. + r.freeFirstBufferIfEmpty() + } + + return n, nil +} + +func (r *sliceReader) ReadByte() (byte, error) { + if r.len == 0 { + return 0, io.EOF + } + + // There may be any number of empty buffers in the slice, clear them all until a + // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0. + for r.freeFirstBufferIfEmpty() { + } + + b := r.data[0].ReadOnlyData()[r.bufferIdx] + r.len-- + r.bufferIdx++ + // Free the first buffer in the slice if the last byte was read + r.freeFirstBufferIfEmpty() + return b, nil +} + +var _ io.Writer = (*writer)(nil) + +type writer struct { + buffers *BufferSlice + pool BufferPool +} + +func (w *writer) Write(p []byte) (n int, err error) { + b := Copy(p, w.pool) + *w.buffers = append(*w.buffers, b) + return b.Len(), nil +} + +// NewWriter wraps the given BufferSlice and BufferPool to implement the +// io.Writer interface. Every call to Write copies the contents of the given +// buffer into a new Buffer pulled from the given pool and the Buffer is added to +// the given BufferSlice. +func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer { + return &writer{buffers: buffers, pool: pool} +} diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go new file mode 100644 index 0000000000000..4d66b2ccc2be6 --- /dev/null +++ b/vendor/google.golang.org/grpc/mem/buffers.go @@ -0,0 +1,252 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package mem provides utilities that facilitate memory reuse in byte slices +// that are used as buffers. +// +// # Experimental +// +// Notice: All APIs in this package are EXPERIMENTAL and may be changed or +// removed in a later release. +package mem + +import ( + "fmt" + "sync" + "sync/atomic" +) + +// A Buffer represents a reference counted piece of data (in bytes) that can be +// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be +// released by calling Free(), which invokes the free function given at creation +// only after all references are released. +// +// Note that a Buffer is not safe for concurrent access and instead each +// goroutine should use its own reference to the data, which can be acquired via +// a call to Ref(). +// +// Attempts to access the underlying data after releasing the reference to the +// Buffer will panic. +type Buffer interface { + // ReadOnlyData returns the underlying byte slice. Note that it is undefined + // behavior to modify the contents of this slice in any way. + ReadOnlyData() []byte + // Ref increases the reference counter for this Buffer. + Ref() + // Free decrements this Buffer's reference counter and frees the underlying + // byte slice if the counter reaches 0 as a result of this call. + Free() + // Len returns the Buffer's size. + Len() int + + split(n int) (left, right Buffer) + read(buf []byte) (int, Buffer) +} + +var ( + bufferPoolingThreshold = 1 << 10 + + bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }} + refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }} +) + +func IsBelowBufferPoolingThreshold(size int) bool { + return size <= bufferPoolingThreshold +} + +type buffer struct { + origData *[]byte + data []byte + refs *atomic.Int32 + pool BufferPool +} + +func newBuffer() *buffer { + return bufferObjectPool.Get().(*buffer) +} + +// NewBuffer creates a new Buffer from the given data, initializing the reference +// counter to 1. The data will then be returned to the given pool when all +// references to the returned Buffer are released. As a special case to avoid +// additional allocations, if the given buffer pool is nil, the returned buffer +// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the +// underlying data is never freed. +// +// Note that the backing array of the given data is not copied. +func NewBuffer(data *[]byte, pool BufferPool) Buffer { + if pool == nil || IsBelowBufferPoolingThreshold(len(*data)) { + return (SliceBuffer)(*data) + } + b := newBuffer() + b.origData = data + b.data = *data + b.pool = pool + b.refs = refObjectPool.Get().(*atomic.Int32) + b.refs.Add(1) + return b +} + +// Copy creates a new Buffer from the given data, initializing the reference +// counter to 1. +// +// It acquires a []byte from the given pool and copies over the backing array +// of the given data. The []byte acquired from the pool is returned to the +// pool when all references to the returned Buffer are released. +func Copy(data []byte, pool BufferPool) Buffer { + if IsBelowBufferPoolingThreshold(len(data)) { + buf := make(SliceBuffer, len(data)) + copy(buf, data) + return buf + } + + buf := pool.Get(len(data)) + copy(*buf, data) + return NewBuffer(buf, pool) +} + +func (b *buffer) ReadOnlyData() []byte { + if b.refs == nil { + panic("Cannot read freed buffer") + } + return b.data +} + +func (b *buffer) Ref() { + if b.refs == nil { + panic("Cannot ref freed buffer") + } + b.refs.Add(1) +} + +func (b *buffer) Free() { + if b.refs == nil { + panic("Cannot free freed buffer") + } + + refs := b.refs.Add(-1) + switch { + case refs > 0: + return + case refs == 0: + if b.pool != nil { + b.pool.Put(b.origData) + } + + refObjectPool.Put(b.refs) + b.origData = nil + b.data = nil + b.refs = nil + b.pool = nil + bufferObjectPool.Put(b) + default: + panic("Cannot free freed buffer") + } +} + +func (b *buffer) Len() int { + return len(b.ReadOnlyData()) +} + +func (b *buffer) split(n int) (Buffer, Buffer) { + if b.refs == nil { + panic("Cannot split freed buffer") + } + + b.refs.Add(1) + split := newBuffer() + split.origData = b.origData + split.data = b.data[n:] + split.refs = b.refs + split.pool = b.pool + + b.data = b.data[:n] + + return b, split +} + +func (b *buffer) read(buf []byte) (int, Buffer) { + if b.refs == nil { + panic("Cannot read freed buffer") + } + + n := copy(buf, b.data) + if n == len(b.data) { + b.Free() + return n, nil + } + + b.data = b.data[n:] + return n, b +} + +// String returns a string representation of the buffer. May be used for +// debugging purposes. +func (b *buffer) String() string { + return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData())) +} + +func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) { + return buf.read(dst) +} + +// SplitUnsafe modifies the receiver to point to the first n bytes while it +// returns a new reference to the remaining bytes. The returned Buffer functions +// just like a normal reference acquired using Ref(). +func SplitUnsafe(buf Buffer, n int) (left, right Buffer) { + return buf.split(n) +} + +type emptyBuffer struct{} + +func (e emptyBuffer) ReadOnlyData() []byte { + return nil +} + +func (e emptyBuffer) Ref() {} +func (e emptyBuffer) Free() {} + +func (e emptyBuffer) Len() int { + return 0 +} + +func (e emptyBuffer) split(int) (left, right Buffer) { + return e, e +} + +func (e emptyBuffer) read([]byte) (int, Buffer) { + return 0, e +} + +type SliceBuffer []byte + +func (s SliceBuffer) ReadOnlyData() []byte { return s } +func (s SliceBuffer) Ref() {} +func (s SliceBuffer) Free() {} +func (s SliceBuffer) Len() int { return len(s) } + +func (s SliceBuffer) split(n int) (left, right Buffer) { + return s[:n], s[n:] +} + +func (s SliceBuffer) read(buf []byte) (int, Buffer) { + n := copy(buf, s) + if n == len(s) { + return n, nil + } + return n, s[n:] +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 1e9485fd6e268..d2e15253bbfbc 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Keys are matched in a case insensitive // manner. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func ValueFromIncomingContext(ctx context.Context, key string) []string { md, ok := ctx.Value(mdIncomingKey{}).(MD) if !ok { @@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // Case insenitive comparison: MD is a map, and there's no guarantee + // Case insensitive comparison: MD is a map, and there's no guarantee // that the MD attached to the context is created using our helper // functions. if strings.EqualFold(k, key) { diff --git a/vendor/google.golang.org/grpc/orca/call_metrics.go b/vendor/google.golang.org/grpc/orca/call_metrics.go index 157dad49c6571..9ae7721420316 100644 --- a/vendor/google.golang.org/grpc/orca/call_metrics.go +++ b/vendor/google.golang.org/grpc/orca/call_metrics.go @@ -156,7 +156,7 @@ func unaryInt(smp ServerMetricsProvider) func(ctx context.Context, req any, _ *g } func streamInt(smp ServerMetricsProvider) func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error { // We don't allocate the metric recorder here. It will be allocated the // first time the user calls CallMetricsRecorderFromContext(). rw := &recorderWrapper{smp: smp} diff --git a/vendor/google.golang.org/grpc/orca/producer.go b/vendor/google.golang.org/grpc/orca/producer.go index 04edae6de66f1..6e7c4c9f301a9 100644 --- a/vendor/google.golang.org/grpc/orca/producer.go +++ b/vendor/google.golang.org/grpc/orca/producer.go @@ -72,7 +72,7 @@ type OOBListenerOptions struct { // returned stop function must be called when no longer needed. Do not // register a single OOBListener more than once per SubConn. func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOptions) (stop func()) { - pr, close := sc.GetOrBuildProducer(producerBuilderSingleton) + pr, closeFn := sc.GetOrBuildProducer(producerBuilderSingleton) p := pr.(*producer) p.registerListener(l, opts.ReportInterval) @@ -84,7 +84,7 @@ func RegisterOOBListener(sc balancer.SubConn, l OOBListener, opts OOBListenerOpt // subsequent calls. return grpcsync.OnceFunc(func() { p.unregisterListener(l, opts.ReportInterval) - close() + closeFn() }) } diff --git a/vendor/google.golang.org/grpc/orca/server_metrics.go b/vendor/google.golang.org/grpc/orca/server_metrics.go index 67d1fa9d7f2b3..bb664d6a08143 100644 --- a/vendor/google.golang.org/grpc/orca/server_metrics.go +++ b/vendor/google.golang.org/grpc/orca/server_metrics.go @@ -108,7 +108,7 @@ type ServerMetricsRecorder interface { // SetMemoryUtilization sets the memory utilization server metric. Must be // in the range [0, 1]. SetMemoryUtilization(float64) - // DeleteMemoryUtilization deletes the memory utiliztion server metric to + // DeleteMemoryUtilization deletes the memory utilization server metric to // prevent it from being sent. DeleteMemoryUtilization() diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 73bd63364335e..e87a17f36a50b 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -20,6 +20,7 @@ package grpc import ( "google.golang.org/grpc/codes" + "google.golang.org/grpc/mem" "google.golang.org/grpc/status" ) @@ -31,9 +32,10 @@ import ( // later release. type PreparedMsg struct { // Struct for preparing msg before sending them - encodedData []byte + encodedData mem.BufferSlice hdr []byte - payload []byte + payload mem.BufferSlice + pf payloadFormat } // Encode marshalls and compresses the message using the codec and compressor for the stream. @@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error { if err != nil { return err } - p.encodedData = data - compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + + materializedData := data.Materialize() + data.Free() + p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)} + + // TODO: it should be possible to grab the bufferPool from the underlying + // stream implementation with a type cast to its actual type (such as + // addrConnStream) and accessing the buffer pool directly. + var compData mem.BufferSlice + compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool()) if err != nil { return err } - p.hdr, p.payload = msgHeader(data, compData) + + if p.pf.isCompressed() { + materializedCompData := compData.Materialize() + compData.Free() + compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)} + } + + p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf) + return nil } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh deleted file mode 100644 index 3edca296c224c..0000000000000 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 gRPC authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eu -o pipefail - -WORKDIR=$(mktemp -d) - -function finish { - rm -rf "$WORKDIR" -} -trap finish EXIT - -export GOBIN=${WORKDIR}/bin -export PATH=${GOBIN}:${PATH} -mkdir -p ${GOBIN} - -echo "remove existing generated files" -# grpc_testing_not_regenerate/*.pb.go is not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') - -echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" -(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) - -echo "go install cmd/protoc-gen-go-grpc" -(cd cmd/protoc-gen-go-grpc && go install .) - -echo "git clone https://github.com/grpc/grpc-proto" -git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto - -echo "git clone https://github.com/protocolbuffers/protobuf" -git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf - -# Pull in code.proto as a proto dependency -mkdir -p ${WORKDIR}/googleapis/google/rpc -echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" -curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto - -mkdir -p ${WORKDIR}/out - -# Generates sources without the embed requirement -LEGACY_SOURCES=( - ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto - ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto - ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto - ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto - profiling/proto/service.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto - ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto -) - -# Generates only the new gRPC Service symbols -SOURCES=( - $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$') - ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto - ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto - ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto - ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto - ${WORKDIR}/grpc-proto/grpc/testing/*.proto - ${WORKDIR}/grpc-proto/grpc/core/*.proto -) - -# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an -# import path of 'bar' in the generated code when 'foo.proto' is imported in -# one of the sources. -# -# Note that the protos listed here are all for testing purposes. All protos to -# be used externally should have a go_package option (and they don't need to be -# listed here). -OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ -Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ -Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing - -for src in ${SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -for src in ${LEGACY_SOURCES[@]}; do - echo "protoc ${src}" - protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ - -I"." \ - -I${WORKDIR}/grpc-proto \ - -I${WORKDIR}/googleapis \ - -I${WORKDIR}/protobuf/src \ - ${src} -done - -# The go_package option in grpc/lookup/v1/rls.proto doesn't match the -# current location. Move it into the right place. -mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 - -# grpc_testing_not_regenerate/*.pb.go are not re-generated, -# see grpc_testing_not_regenerate/README.md for details. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go - -cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index f2efa2a2cb5a9..09e864a89d35b 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -76,9 +76,11 @@ func (r *Resolver) InitialState(s resolver.State) { // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - r.BuildCallback(target, cc, opts) r.mu.Lock() defer r.mu.Unlock() + // Call BuildCallback after locking to avoid a race when UpdateState + // or ReportError is called before Build returns. + r.BuildCallback(target, cc, opts) r.CC = cc if r.lastSeenState != nil { err := r.CC.UpdateState(*r.lastSeenState) diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index c5fb45236faf6..23bb3fb258240 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { // any newly created ccResolverWrapper, except that close may be called instead. func (ccr *ccResolverWrapper) start() error { errCh := make(chan error) - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil { return } @@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error { } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.serializer.Schedule(func(ctx context.Context) { + ccr.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || ccr.resolver == nil { return } @@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() { ccr.closed = true ccr.mu.Unlock() - ccr.serializer.Schedule(func(context.Context) { + ccr.serializer.TrySchedule(func(context.Context) { if ccr.resolver == nil { return } @@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P // addChannelzTraceEvent adds a channelz trace event containing the new // state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + if !logger.V(0) && !channelz.IsOn() { + return + } var updates []string var oldSC, newSC *ServiceConfig var oldOK, newOK bool diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index fdd49e6e91510..2d96f1405e8da 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -19,7 +19,6 @@ package grpc import ( - "bytes" "compress/gzip" "context" "encoding/binary" @@ -35,6 +34,7 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -220,8 +220,8 @@ type HeaderCallOption struct { HeaderAddr *metadata.MD } -func (o HeaderCallOption) before(c *callInfo) error { return nil } -func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { +func (o HeaderCallOption) before(*callInfo) error { return nil } +func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { *o.HeaderAddr, _ = attempt.s.Header() } @@ -242,8 +242,8 @@ type TrailerCallOption struct { TrailerAddr *metadata.MD } -func (o TrailerCallOption) before(c *callInfo) error { return nil } -func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o TrailerCallOption) before(*callInfo) error { return nil } +func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { *o.TrailerAddr = attempt.s.Trailer() } @@ -264,24 +264,20 @@ type PeerCallOption struct { PeerAddr *peer.Peer } -func (o PeerCallOption) before(c *callInfo) error { return nil } -func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { +func (o PeerCallOption) before(*callInfo) error { return nil } +func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { if x, ok := peer.FromContext(attempt.s.Context()); ok { *o.PeerAddr = *x } } -// WaitForReady configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If waitForReady is false and the -// connection is in the TRANSIENT_FAILURE state, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will -// retry the call if it fails due to a transient error. gRPC will not retry if -// data was written to the wire unless the server indicates it did not process -// the data. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// WaitForReady configures the RPC's behavior when the client is in +// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If +// waitForReady is false, the RPC will fail immediately. Otherwise, the client +// will wait until a connection becomes available or the RPC's deadline is +// reached. // -// By default, RPCs don't "wait for ready". +// By default, RPCs do not "wait for ready". func WaitForReady(waitForReady bool) CallOption { return FailFastCallOption{FailFast: !waitForReady} } @@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error { c.failFast = o.FailFast return nil } -func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o FailFastCallOption) after(*callInfo, *csAttempt) {} // OnFinish returns a CallOption that configures a callback to be called when // the call completes. The error passed to the callback is the status of the @@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error { return nil } -func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o OnFinishCallOption) after(*callInfo, *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size // in bytes the client can receive. If this is not set, gRPC uses the default @@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { c.maxReceiveMessageSize = &o.MaxRecvMsgSize return nil } -func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size // in bytes the client can send. If this is not set, gRPC uses the default @@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { c.maxSendMessageSize = &o.MaxSendMsgSize return nil } -func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {} // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials // for a call. @@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error { c.creds = o.Creds return nil } -func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {} // UseCompressor returns a CallOption which sets the compressor used when // sending the request. If WithCompressor is also set, UseCompressor has @@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error { c.compressorType = o.CompressorType return nil } -func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CompressorCallOption) after(*callInfo, *csAttempt) {} // CallContentSubtype returns a CallOption that will set the content-subtype // for a call. For example, if content-subtype is "json", the Content-Type over @@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error { c.contentSubtype = o.ContentSubtype return nil } -func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {} // ForceCodec returns a CallOption that will set codec to be used for all // request and response messages for a call. The result of calling Name() will @@ -515,10 +511,50 @@ type ForceCodecCallOption struct { } func (o ForceCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV1Bridge(o.Codec) return nil } -func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {} + +// ForceCodecV2 returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodecV2(codec encoding.CodecV2) CallOption { + return ForceCodecV2CallOption{CodecV2: codec} +} + +// ForceCodecV2CallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecV2CallOption struct { + CodecV2 encoding.CodecV2 +} + +func (o ForceCodecV2CallOption) before(c *callInfo) error { + c.codec = o.CodecV2 + return nil +} + +func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {} // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of // an encoding.Codec. @@ -540,10 +576,10 @@ type CustomCodecCallOption struct { } func (o CustomCodecCallOption) before(c *callInfo) error { - c.codec = o.Codec + c.codec = newCodecV0Bridge(o.Codec) return nil } -func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. @@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize return nil } -func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} +func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {} // The format of the payload: compressed or not? type payloadFormat uint8 @@ -581,19 +617,28 @@ const ( compressionMade payloadFormat = 1 // compressed ) +func (pf payloadFormat) isCompressed() bool { + return pf == compressionMade +} + +type streamReader interface { + ReadHeader(header []byte) error + Read(n int) (mem.BufferSlice, error) +} + // parser reads complete gRPC messages from the underlying reader. type parser struct { // r is the underlying reader. // See the comment on recvMsg for the permissible // error types. - r io.Reader + r streamReader // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte - // recvBufferPool is the pool of shared receive buffers. - recvBufferPool SharedBufferPool + // bufferPool is the pool of shared receive buffers. + bufferPool mem.BufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -608,14 +653,15 @@ type parser struct { // - an error from the status package // // No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible +// that the underlying streamReader must not return an incompatible // error. -func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := p.r.Read(p.header[:]); err != nil { +func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) { + err := p.r.ReadHeader(p.header[:]) + if err != nil { return 0, nil, err } - pf = payloadFormat(p.header[0]) + pf := payloadFormat(p.header[0]) length := binary.BigEndian.Uint32(p.header[1:]) if length == 0 { @@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - msg = p.recvBufferPool.Get(int(length)) - if _, err := p.r.Read(msg); err != nil { + + data, err := p.r.Read(int(length)) + if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return 0, nil, err } - return pf, msg, nil + return pf, data, nil } // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg any) ([]byte, error) { +func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(len(b)) > math.MaxUint32 { + if uint(b.Len()) > math.MaxUint32 { + b.Free() return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) } return b, nil @@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) { // indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. -func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { - if compressor == nil && cp == nil { - return nil, nil - } - if len(in) == 0 { - return nil, nil +func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) { + if (compressor == nil && cp == nil) || in.Len() == 0 { + return nil, compressionNone, nil } + var out mem.BufferSlice + w := mem.NewWriter(&out, pool) wrapErr := func(err error) error { + out.Free() return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } - cbuf := &bytes.Buffer{} if compressor != nil { - z, err := compressor.Compress(cbuf) + z, err := compressor.Compress(w) if err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } - if _, err := z.Write(in); err != nil { - return nil, wrapErr(err) + for _, b := range in { + if _, err := z.Write(b.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) + } } if err := z.Close(); err != nil { - return nil, wrapErr(err) + return nil, 0, wrapErr(err) } } else { - if err := cp.Do(cbuf, in); err != nil { - return nil, wrapErr(err) + // This is obviously really inefficient since it fully materializes the data, but + // there is no way around this with the old Compressor API. At least it attempts + // to return the buffer to the provider, in the hopes it can be reused (maybe + // even by a subsequent call to this very function). + buf := in.MaterializeToBuffer(pool) + defer buf.Free() + if err := cp.Do(w, buf.ReadOnlyData()); err != nil { + return nil, 0, wrapErr(err) } } - return cbuf.Bytes(), nil + return out, compressionMade, nil } const ( @@ -697,33 +752,36 @@ const ( // msgHeader returns a 5-byte header for the message being transmitted and the // payload, which is compData if non-nil or data otherwise. -func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { +func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) { hdr = make([]byte, headerLen) - if compData != nil { - hdr[0] = byte(compressionMade) - data = compData + hdr[0] = byte(pf) + + var length uint32 + if pf.isCompressed() { + length = uint32(compData.Len()) + payload = compData } else { - hdr[0] = byte(compressionNone) + length = uint32(data.Len()) + payload = data } // Write length of payload into buf - binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) - return hdr, data + binary.BigEndian.PutUint32(hdr[payloadLen:], length) + return hdr, payload } -func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - CompressedLength: len(payload), + Length: dataLength, + WireLength: payloadLength + headerLen, + CompressedLength: payloadLength, SentTime: t, } } -func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status { switch pf { case compressionNone: case compressionMade: @@ -731,7 +789,11 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") } if !haveCompressor { - return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + if isServer { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } else { + return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } } default: return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) @@ -741,104 +803,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool type payloadInfo struct { compressedLength int // The compressed length got from wire. - uncompressedBytes []byte + uncompressedBytes mem.BufferSlice +} + +func (p *payloadInfo) free() { + if p != nil && p.uncompressedBytes != nil { + p.uncompressedBytes.Free() + } } // recvAndDecompress reads a message from the stream, decompressing it if necessary. // // Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as // the buffer is no longer needed. -func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, -) (uncompressedBuf []byte, cancel func(), err error) { - pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize) +// TODO: Refactor this function to reduce the number of arguments. +// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool, +) (out mem.BufferSlice, err error) { + pf, compressed, err := p.recvMsg(maxReceiveMessageSize) if err != nil { - return nil, nil, err + return nil, err } - if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { - return nil, nil, st.Err() + compressedLength := compressed.Len() + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil { + compressed.Free() + return nil, st.Err() } var size int - if pf == compressionMade { + if pf.isCompressed() { + defer compressed.Free() + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf)) + var uncompressedBuf []byte + uncompressedBuf, err = dc.Do(compressed.Reader()) + if err == nil { + out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)} + } size = len(uncompressedBuf) } else { - uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize) + out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) } if err != nil { - return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { + out.Free() // TODO: Revisit the error code. Currently keep it consistent with java // implementation. - return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } else { - uncompressedBuf = compressedBuf + out = compressed } if payInfo != nil { - payInfo.compressedLength = len(compressedBuf) - payInfo.uncompressedBytes = uncompressedBuf - - cancel = func() {} - } else { - cancel = func() { - p.recvBufferPool.Put(&compressedBuf) - } + payInfo.compressedLength = compressedLength + out.Ref() + payInfo.uncompressedBytes = out } - return uncompressedBuf, cancel, nil + return out, nil } // Using compressor, decompress d, returning data and size. // Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { - dcReader, err := compressor.Decompress(bytes.NewReader(d)) +func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { + dcReader, err := compressor.Decompress(d.Reader()) if err != nil { return nil, 0, err } - if sizer, ok := compressor.(interface { - DecompressedSize(compressedBytes []byte) int - }); ok { - if size := sizer.DecompressedSize(d); size >= 0 { - if size > maxReceiveMessageSize { - return nil, size, nil - } - // size is used as an estimate to size the buffer, but we - // will read more data if available. - // +MinRead so ReadFrom will not reallocate if size is correct. - // - // TODO: If we ensure that the buffer size is the same as the DecompressedSize, - // we can also utilize the recv buffer pool here. - buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) - bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return buf.Bytes(), int(bytesRead), err - } + + // TODO: Can/should this still be preserved with the new BufferSlice API? Are + // there any actual benefits to allocating a single large buffer instead of + // multiple smaller ones? + //if sizer, ok := compressor.(interface { + // DecompressedSize(compressedBytes []byte) int + //}); ok { + // if size := sizer.DecompressedSize(d); size >= 0 { + // if size > maxReceiveMessageSize { + // return nil, size, nil + // } + // // size is used as an estimate to size the buffer, but we + // // will read more data if available. + // // +MinRead so ReadFrom will not reallocate if size is correct. + // // + // // TODO: If we ensure that the buffer size is the same as the DecompressedSize, + // // we can also utilize the recv buffer pool here. + // buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + // bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + // return buf.Bytes(), int(bytesRead), err + // } + //} + + var out mem.BufferSlice + _, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + if err != nil { + out.Free() + return nil, 0, err } - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) - return d, len(d), err + return out, out.Len(), nil } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error { + data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer) if err != nil { return err } - defer cancel() - if err := c.Unmarshal(buf, m); err != nil { + // If the codec wants its own reference to the data, it can get it. Otherwise, always + // free the buffers. + defer data.Free() + + if err := c.Unmarshal(data, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } + return nil } @@ -941,7 +1028,7 @@ func setCallInfoCodec(c *callInfo) error { // encoding.Codec (Name vs. String method name). We only support // setting content subtype from encoding.Codec to avoid a behavior // change with the deprecated version. - if ec, ok := c.codec.(encoding.Codec); ok { + if ec, ok := c.codec.(encoding.CodecV2); ok { c.contentSubtype = strings.ToLower(ec.Name()) } } @@ -950,12 +1037,12 @@ func setCallInfoCodec(c *callInfo) error { if c.contentSubtype == "" { // No codec specified in CallOptions; use proto by default. - c.codec = encoding.GetCodec(proto.Name) + c.codec = getCodec(proto.Name) return nil } // c.contentSubtype is already lowercased in CallContentSubtype - c.codec = encoding.GetCodec(c.contentSubtype) + c.codec = getCodec(c.contentSubtype) if c.codec == nil { return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 89f8e4792bf15..d1e1415a40f9b 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -45,6 +45,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -80,7 +81,7 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption - internal.RecvBufferPool = recvBufferPool + internal.BufferPool = bufferPool } var statusOK = status.New(codes.OK, "") @@ -170,7 +171,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 - recvBufferPool SharedBufferPool + bufferPool mem.BufferPool waitForHandlers bool } @@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, - recvBufferPool: nopBufferPool{}, + bufferPool: mem.DefaultBufferPool(), } var globalServerOptions []ServerOption @@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { // Will be supported throughout 1.x. func CustomCodec(codec Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV0Bridge(codec) }) } @@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption { // later release. func ForceServerCodec(codec encoding.Codec) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.codec = codec + o.codec = newCodecV1Bridge(codec) + }) +} + +// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new +// CodecV2 interface. +// +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codecV2 }) } @@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption { }) } -// RecvBufferPool returns a ServerOption that configures the server -// to use the provided shared buffer pool for parsing incoming messages. Depending -// on the application's workload, this could result in reduced memory allocation. -// -// If you are unsure about how to implement a memory pool but want to utilize one, -// begin with grpc.NewSharedBufferPool. -// -// Note: The shared buffer pool feature will not be active if any of the following -// options are used: StatsHandler, EnableTracing, or binary logging. In such -// cases, the shared buffer pool will be ignored. -// -// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in -// v1.60.0 or later. -func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { - return recvBufferPool(bufferPool) -} - -func recvBufferPool(bufferPool SharedBufferPool) ServerOption { +func bufferPool(bufferPool mem.BufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { - o.recvBufferPool = bufferPool + o.bufferPool = bufferPool }) } @@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption { // workload (assuming a QPS of a few thousand requests/sec). const serverWorkerResetThreshold = 1 << 16 -// serverWorkers blocks on a *transport.Stream channel forever and waits for +// serverWorker blocks on a *transport.Stream channel forever and waits for // data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). @@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, + BufferPool: s.opts.bufferPool, } st, err := transport.NewServerTransport(c, config) if err != nil { @@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil) // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool) if err != nil { // Errors returned from transport.NewServerHandlerTransport have // already been written to w. @@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } - compData, err := compress(data, cp, comp) + + compData, pf, err := compress(data, cp, comp, s.opts.bufferPool) if err != nil { + data.Free() channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } - hdr, payload := msgHeader(data, compData) + + hdr, payload := msgHeader(data, compData, pf) + + defer func() { + compData.Free() + data.Free() + // payload does not need to be freed here, it is either data or compData, both of + // which are already freed. + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > s.opts.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + if payloadLen > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize) } err = t.Write(stream, hdr, payload, opts) if err == nil { - for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) + if len(s.opts.statsHandlers) != 0 { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now())) + } } } return err @@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor var payInfo *payloadInfo if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } + defer d.Free() if channelz.IsOn() { t.IncrMsgRecv() } df := func(v any) error { - defer cancel() - if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } + for _, sh := range shs { sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, - Length: len(d), + Length: d.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Data: d, }) } if len(binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: d, + Message: d.Materialize(), } for _, binlog := range binlogs { binlog.Log(ctx, cm) @@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran ctx: ctx, t: t, s: stream, - p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + p: &parser{r: stream, bufferPool: s.opts.bufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { return s.opts.codec } if contentSubtype == "" { - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } - codec := encoding.GetCodec(contentSubtype) + codec := getCodec(contentSubtype) if codec == nil { logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) - return encoding.GetCodec(proto.Name) + return getCodec(proto.Name) } return codec } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go deleted file mode 100644 index 48a64cfe8e256..0000000000000 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import "sync" - -// SharedBufferPool is a pool of buffers that can be shared, resulting in -// decreased memory allocation. Currently, in gRPC-go, it is only utilized -// for parsing incoming messages. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -type SharedBufferPool interface { - // Get returns a buffer with specified length from the pool. - // - // The returned byte slice may be not zero initialized. - Get(length int) []byte - - // Put returns a buffer to the pool. - Put(*[]byte) -} - -// NewSharedBufferPool creates a simple SharedBufferPool with buckets -// of different sizes to optimize memory usage. This prevents the pool from -// wasting large amounts of memory, even when handling messages of varying sizes. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. -func NewSharedBufferPool() SharedBufferPool { - return &simpleSharedBufferPool{ - pools: [poolArraySize]simpleSharedBufferChildPool{ - newBytesPool(level0PoolMaxSize), - newBytesPool(level1PoolMaxSize), - newBytesPool(level2PoolMaxSize), - newBytesPool(level3PoolMaxSize), - newBytesPool(level4PoolMaxSize), - newBytesPool(0), - }, - } -} - -// simpleSharedBufferPool is a simple implementation of SharedBufferPool. -type simpleSharedBufferPool struct { - pools [poolArraySize]simpleSharedBufferChildPool -} - -func (p *simpleSharedBufferPool) Get(size int) []byte { - return p.pools[p.poolIdx(size)].Get(size) -} - -func (p *simpleSharedBufferPool) Put(bs *[]byte) { - p.pools[p.poolIdx(cap(*bs))].Put(bs) -} - -func (p *simpleSharedBufferPool) poolIdx(size int) int { - switch { - case size <= level0PoolMaxSize: - return level0PoolIdx - case size <= level1PoolMaxSize: - return level1PoolIdx - case size <= level2PoolMaxSize: - return level2PoolIdx - case size <= level3PoolMaxSize: - return level3PoolIdx - case size <= level4PoolMaxSize: - return level4PoolIdx - default: - return levelMaxPoolIdx - } -} - -const ( - level0PoolMaxSize = 16 // 16 B - level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B - level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB - level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB - level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB -) - -const ( - level0PoolIdx = iota - level1PoolIdx - level2PoolIdx - level3PoolIdx - level4PoolIdx - levelMaxPoolIdx - poolArraySize -) - -type simpleSharedBufferChildPool interface { - Get(size int) []byte - Put(any) -} - -type bufferPool struct { - sync.Pool - - defaultSize int -} - -func (p *bufferPool) Get(size int) []byte { - bs := p.Pool.Get().(*[]byte) - - if cap(*bs) < size { - p.Pool.Put(bs) - - return make([]byte, size) - } - - return (*bs)[:size] -} - -func newBytesPool(size int) simpleSharedBufferChildPool { - return &bufferPool{ - Pool: sync.Pool{ - New: func() any { - bs := make([]byte, size) - return &bs - }, - }, - defaultSize: size, - } -} - -// nopBufferPool is a buffer pool just makes new buffer without pooling. -type nopBufferPool struct { -} - -func (nopBufferPool) Get(length int) []byte { - return make([]byte, length) -} - -func (nopBufferPool) Put(*[]byte) { -} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index fdb0bd65182c5..71195c4943d7a 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -77,9 +77,6 @@ type InPayload struct { // the call to HandleRPC which provides the InPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). @@ -150,9 +147,6 @@ type OutPayload struct { // the call to HandleRPC which provides the OutPayload returns and must be // copied if needed later. Payload any - // Data is the serialized message payload. - // Deprecated: Data will be removed in the next release. - Data []byte // Length is the size of the uncompressed payload data. Does not include any // framing (gRPC or HTTP/2). Length int diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 8051ef5b514a3..bb2b2a216ce24 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -41,6 +41,7 @@ import ( "google.golang.org/grpc/internal/serviceconfig" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/mem" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" @@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client cs.attempt = a return nil } - if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil { return nil, err } @@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error { } a.s = s a.ctx = s.Context() - a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -566,10 +567,15 @@ type clientStream struct { // place where we need to check if the attempt is nil. attempt *csAttempt // TODO(hedging): hedging will have multiple attempts simultaneously. - committed bool // active attempt committed for retry? - onCommit func() - buffer []func(a *csAttempt) error // operations to replay on retry - bufferSize int // current size of buffer + committed bool // active attempt committed for retry? + onCommit func() + replayBuffer []replayOp // operations to replay on retry + replayBufferSize int // current size of replayBuffer +} + +type replayOp struct { + op func(a *csAttempt) error + cleanup func() } // csAttempt implements a single transport stream attempt within a @@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() { cs.onCommit() } cs.committed = true - cs.buffer = nil + for _, op := range cs.replayBuffer { + if op.cleanup != nil { + op.cleanup() + } + } + cs.replayBuffer = nil } func (cs *clientStream) commitAttempt() { @@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { // the stream is canceled. return err } - // Note that the first op in the replay buffer always sets cs.attempt + // Note that the first op in replayBuffer always sets cs.attempt // if it is able to pick a transport and create a stream. if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { return nil @@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) // already be status errors. return toRPCErr(op(cs.attempt)) } - if len(cs.buffer) == 0 { + if len(cs.replayBuffer) == 0 { // For the first op, which controls creation of the stream and // assigns cs.attempt, we need to create a new attempt inline // before executing the first op. On subsequent ops, the attempt @@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD { } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { - for _, f := range cs.buffer { - if err := f(attempt); err != nil { + for _, f := range cs.replayBuffer { + if err := f.op(attempt); err != nil { return err } } return nil } -func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) { // Note: we still will buffer if retry is disabled (for transparent retries). if cs.committed { return } - cs.bufferSize += sz - if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.replayBufferSize += sz + if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize { cs.commitAttemptLocked() + cleanup() return } - cs.buffer = append(cs.buffer, op) + cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup}) } func (cs *clientStream) SendMsg(m any) (err error) { @@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + if payloadLen > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize) } + + // always take an extra ref in case data == payload (i.e. when the data isn't + // compressed). The original ref will always be freed by the deferred free above. + payload.Ref() op := func(a *csAttempt) error { - return a.sendMsg(m, hdr, payload, data) + return a.sendMsg(m, hdr, payload, dataLen, payloadLen) + } + + // onSuccess is invoked when the op is captured for a subsequent retry. If the + // stream was established by a previous message and therefore retries are + // disabled, onSuccess will not be invoked, and payloadRef can be freed + // immediately. + onSuccessCalled := false + err = cs.withRetry(op, func() { + cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free) + onSuccessCalled = true + }) + if !onSuccessCalled { + payload.Free() } - err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) if len(cs.binlogs) != 0 && err == nil { cm := &binarylog.ClientMessage{ OnClientSide: true, - Message: data, + Message: data.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, cm) @@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error { var recvInfo *payloadInfo if len(cs.binlogs) != 0 { recvInfo = &payloadInfo{} + defer recvInfo.free() } err := cs.withRetry(func(a *csAttempt) error { return a.recvMsg(m, recvInfo) @@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && err == nil { sm := &binarylog.ServerMessage{ OnClientSide: true, - Message: recvInfo.uncompressedBytes, + Message: recvInfo.uncompressedBytes.Materialize(), } for _, binlog := range cs.binlogs { binlog.Log(cs.ctx, sm) @@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error { // RecvMsg. This also matches historical behavior. return nil } - cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }) if len(cs.binlogs) != 0 { chc := &binarylog.ClientHalfClose{ OnClientSide: true, @@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { } return io.EOF } - for _, sh := range a.statsHandlers { - sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + if len(a.statsHandlers) != 0 { + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now())) + } } if channelz.IsOn() { a.t.IncrMsgSent() @@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} + defer payInfo.free() } if !a.decompSet { @@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) - if err != nil { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { return statusErr @@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } for _, sh := range a.statsHandlers { sh.HandleRPC(a.ctx, &stats.InPayload{ - Client: true, - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, + Client: true, + RecvTime: time.Now(), + Payload: m, WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, - Length: len(payInfo.uncompressedBytes), + Length: payInfo.uncompressedBytes.Len(), }) } if channelz.IsOn() { @@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { return a.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (a *csAttempt) finish(err error) { @@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// newClientStream creates a ClientStream with the specified transport, on the +// newNonRetryClientStream creates a ClientStream with the specified transport, on the // given addrConn. // // It's expected that the given transport is either the same one in addrConn, or // is already closed. To avoid race, transport is specified separately, instead -// of using ac.transpot. +// of using ac.transport. // // Main difference between this and ClientConn.NewStream: // - no retry @@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + // TODO(dfawley): should we be checking len(data) instead? - if len(payld) > *as.callInfo.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + if payload.Len() > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Only initialize this state once per stream. as.decompSet = true } - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err != nil { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { if err == io.EOF { if statusErr := as.s.Status().Err(); statusErr != nil { return statusErr @@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { + if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { return as.s.Status().Err() // non-server streaming Recv returns nil on success + } else if err != nil { + return toRPCErr(err) } - return toRPCErr(err) + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } func (as *addrConnStream) finish(err error) { @@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) if err != nil { return err } + defer func() { + data.Free() + // only free payload if compression was made, and therefore it is a different set + // of buffers from data. + if pf.isCompressed() { + payload.Free() + } + }() + + dataLen := data.Len() + payloadLen := payload.Len() + // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > ss.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + if payloadLen > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize) } if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { return toRPCErr(err) } + if len(ss.binlogs) != 0 { if !ss.serverHeaderBinlogged { h, _ := ss.s.Header() @@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } } sm := &binarylog.ServerMessage{ - Message: data, + Message: data.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, sm) @@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) { } if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { - sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now())) } } return nil @@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { var payInfo *payloadInfo if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { payInfo = &payloadInfo{} + defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) { if len(ss.statsHandler) != 0 { for _, sh := range ss.statsHandler { sh.HandleRPC(ss.s.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: m, - // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - Length: len(payInfo.uncompressedBytes), + RecvTime: time.Now(), + Payload: m, + Length: payInfo.uncompressedBytes.Len(), WireLength: payInfo.compressedLength + headerLen, CompressedLength: payInfo.compressedLength, }) @@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { } if len(ss.binlogs) != 0 { cm := &binarylog.ClientMessage{ - Message: payInfo.uncompressedBytes, + Message: payInfo.uncompressedBytes.Materialize(), } for _, binlog := range ss.binlogs { binlog.Log(ss.ctx, cm) @@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { return Method(stream.Context()) } -// prepareMsg returns the hdr, payload and data -// using the compressors passed or using the -// passed preparedmsg -func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +// prepareMsg returns the hdr, payload and data using the compressors passed or +// using the passed preparedmsg. The returned boolean indicates whether +// compression was made and therefore whether the payload needs to be freed in +// addition to the returned data. Freeing the payload if the returned boolean is +// false can lead to undefined behavior. +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { - return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil } // The input interface is not a prepared msg. // Marshal and Compress the data at this point data, err = encode(codec, m) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, 0, err } - compData, err := compress(data, cp, comp) + compData, pf, err := compress(data, cp, comp, pool) if err != nil { - return nil, nil, nil, err + data.Free() + return nil, nil, nil, 0, err } - hdr, payload = msgHeader(data, compData) - return hdr, payload, data, nil + hdr, payload = msgHeader(data, compData, pf) + return hdr, data, payload, pf, nil } diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go index 8b813529c0cc5..0037fee0bd71a 100644 --- a/vendor/google.golang.org/grpc/stream_interfaces.go +++ b/vendor/google.golang.org/grpc/stream_interfaces.go @@ -22,15 +22,35 @@ package grpc // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. type ServerStreamingClient[Res any] interface { + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } // ServerStreamingServer represents the server side of a server-streaming (one // request, many responses) RPC. It is generic over the type of the response // message. It is used in generated code. +// +// To terminate the response stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type ServerStreamingServer[Res any] interface { + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface { // message stream and the type of the unary response message. It is used in // generated code. type ClientStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using CloseAndRecv(). Send(*Req) error + + // CloseAndRecv closes the request stream and waits for the server's + // response. This method must be called once and only once after sending + // all request messages. Any error returned is implemented by the status + // package. CloseAndRecv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, and Trailer + // functionality. No other methods in the ClientStream should be called + // directly. ClientStream } @@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface { // requests, one response) RPC. It is generic over both the type of the request // message stream and the type of the unary response message. It is used in // generated code. +// +// To terminate the RPC, call SendAndClose and return nil from the method +// handler or do not call SendAndClose and return an error from the status +// package. type ClientStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseAndRecv on its + // ClientStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // SendAndClose sends a single response message to the client and closes + // the stream. This method must be called once and only once after all + // request messages have been processed. Recv should not be called after + // calling SendAndClose. SendAndClose(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } @@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface { // request message stream and the type of the response message stream. It is // used in generated code. type BidiStreamingClient[Req any, Res any] interface { + // Send sends a request message to the server. The client may call Send + // multiple times to send multiple messages to the server. On error, Send + // aborts the stream. If the error was generated by the client, the status + // is returned directly. Otherwise, io.EOF is returned, and the status of + // the stream may be discovered using Recv(). Send(*Req) error + + // Recv receives the next response message from the server. The client may + // repeatedly call Recv to read messages from the response stream. If + // io.EOF is returned, the stream has terminated with an OK status. Any + // other error is compatible with the status package and indicates the + // RPC's status code and message. Recv() (*Res, error) + + // ClientStream is embedded to provide Context, Header, Trailer, and + // CloseSend functionality. No other methods in the ClientStream should be + // called directly. ClientStream } @@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface { // (many requests, many responses) RPC. It is generic over both the type of the // request message stream and the type of the response message stream. It is // used in generated code. +// +// To terminate the stream, return from the handler method and return +// an error from the status package, or use nil to indicate an OK status code. type BidiStreamingServer[Req any, Res any] interface { + // Recv receives the next request message from the client. The server may + // repeatedly call Recv to read messages from the request stream. If + // io.EOF is returned, it indicates the client called CloseSend on its + // BidiStreamingClient. Any other error indicates the stream was + // terminated unexpectedly, and the handler method should return, as the + // stream is no longer usable. Recv() (*Req, error) + + // Send sends a response message to the client. The server handler may + // call Send multiple times to send multiple messages to the client. An + // error is returned if the stream was terminated unexpectedly, and the + // handler method should return, as the stream is no longer usable. Send(*Res) error + + // ServerStream is embedded to provide Context, SetHeader, SendHeader, and + // SetTrailer functionality. No other methods in the ServerStream should + // be called directly. ServerStream } diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go index 3f77f4876eb86..e6eb4feebb99f 100644 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go @@ -109,7 +109,7 @@ type pipe struct { mu sync.Mutex // buf contains the data in the pipe. It is a ring buffer of fixed capacity, - // with r and w pointing to the offset to read and write, respsectively. + // with r and w pointing to the offset to read and write, respectively. // // Data is read between [r, w) and written to [w, r), wrapping around the end // of the slice if necessary. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index bafaef99be989..187fbf1195141 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.65.0" +const Version = "1.67.0" diff --git a/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go b/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go index cb022b45de186..578e1278970d4 100644 --- a/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go +++ b/vendor/google.golang.org/grpc/xds/bootstrap/credentials.go @@ -58,7 +58,7 @@ func (t *tlsCredsBuilder) Name() string { } // googleDefaultCredsBuilder implements the `Credentials` interface defined in -// package `xds/boostrap` and encapsulates a Google Default credential. +// package `xds/bootstrap` and encapsulates a Google Default credential. type googleDefaultCredsBuilder struct{} func (d *googleDefaultCredsBuilder) Build(json.RawMessage) (credentials.Bundle, func(), error) { diff --git a/vendor/google.golang.org/grpc/xds/csds/csds.go b/vendor/google.golang.org/grpc/xds/csds/csds.go index 6266f60e86d94..3d8398a72ff09 100644 --- a/vendor/google.golang.org/grpc/xds/csds/csds.go +++ b/vendor/google.golang.org/grpc/xds/csds/csds.go @@ -27,7 +27,6 @@ import ( "context" "fmt" "io" - "sync" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" @@ -55,22 +54,14 @@ func prefixLogger(s *ClientStatusDiscoveryServer) *internalgrpclog.PrefixLogger // https://github.com/grpc/proposal/blob/master/A40-csds-support.md. type ClientStatusDiscoveryServer struct { logger *internalgrpclog.PrefixLogger - - mu sync.Mutex - xdsClient xdsclient.XDSClient - xdsClientClose func() } // NewClientStatusDiscoveryServer returns an implementation of the CSDS server // that can be registered on a gRPC server. func NewClientStatusDiscoveryServer() (*ClientStatusDiscoveryServer, error) { - c, close, err := xdsclient.New() - if err != nil { - logger.Warningf("Failed to create xDS client: %v", err) - } - s := &ClientStatusDiscoveryServer{xdsClient: c, xdsClientClose: close} + s := &ClientStatusDiscoveryServer{} s.logger = prefixLogger(s) - s.logger.Infof("Created CSDS server, with xdsClient %p", c) + s.logger.Infof("Created CSDS server") return s, nil } @@ -104,24 +95,14 @@ func (s *ClientStatusDiscoveryServer) FetchClientStatus(_ context.Context, req * // // If it returns an error, the error is a status error. func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statuspb.ClientStatusRequest) (*v3statuspb.ClientStatusResponse, error) { - s.mu.Lock() - defer s.mu.Unlock() - - if s.xdsClient == nil { - return &v3statuspb.ClientStatusResponse{}, nil - } // Field NodeMatchers is unsupported, by design // https://github.com/grpc/proposal/blob/master/A40-csds-support.md#detail-node-matching. if len(req.NodeMatchers) != 0 { return nil, status.Errorf(codes.InvalidArgument, "node_matchers are not supported, request contains node_matchers: %v", req.NodeMatchers) } - return s.xdsClient.DumpResources() + return xdsclient.DumpResources(), nil } // Close cleans up the resources. -func (s *ClientStatusDiscoveryServer) Close() { - if s.xdsClientClose != nil { - s.xdsClientClose() - } -} +func (s *ClientStatusDiscoveryServer) Close() {} diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go index 6ab7fb03f2dcf..936bf2da32742 100644 --- a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -26,6 +26,7 @@ package googledirectpath import ( + "encoding/json" "fmt" "math/rand" "net/url" @@ -37,7 +38,6 @@ import ( internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/xds/internal/xdsclient" _ "google.golang.org/grpc/xds" // To register xds resolvers and balancers. ) @@ -46,30 +46,21 @@ const ( c2pScheme = "google-c2p" c2pAuthority = "traffic-director-c2p.xds.googleapis.com" - tdURL = "dns:///directpath-pa.googleapis.com" - httpReqTimeout = 10 * time.Second - zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" - ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" - - gRPCUserAgentName = "gRPC Go" - clientFeatureNoOverprovisioning = "envoy.lb.does_not_support_overprovisioning" - clientFeatureResourceWrapper = "xds.config.resource-in-sotw" - ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE" - - logPrefix = "[google-c2p-resolver]" + tdURL = "dns:///directpath-pa.googleapis.com" + zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone" + ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s" + ipv6CapableMetadataName = "TRAFFICDIRECTOR_DIRECTPATH_C2P_IPV6_CAPABLE" + httpReqTimeout = 10 * time.Second + logPrefix = "[google-c2p-resolver]" dnsName, xdsName = "dns", "xds" ) // For overriding in unittests. var ( - onGCE = googlecloud.OnGCE - - newClientWithConfig = func(config *bootstrap.Config) (xdsclient.XDSClient, func(), error) { - return xdsclient.NewWithConfig(config) - } - - logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) + onGCE = googlecloud.OnGCE + randInt = rand.Int + logger = internalgrpclog.NewPrefixLogger(grpclog.Component("directpath"), logPrefix) ) func init() { @@ -108,23 +99,18 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts xdsServerCfg := newXdsServerConfig(xdsServerURI) authoritiesCfg := newAuthoritiesConfig(xdsServerCfg) - config, err := bootstrap.NewConfigFromContents([]byte(fmt.Sprintf(` - { - "xds_servers": [%s], - "client_default_listener_resource_name_template": "%%s", - "authorities": %s, - "node": %s - }`, xdsServerCfg, authoritiesCfg, nodeCfg))) - - if err != nil { - return nil, fmt.Errorf("failed to build bootstrap configuration: %v", err) + cfg := map[string]any{ + "xds_servers": []any{xdsServerCfg}, + "client_default_listener_resource_name_template": "%s", + "authorities": authoritiesCfg, + "node": nodeCfg, } - - // Create singleton xds client with this config. The xds client will be - // used by the xds resolver later. - _, close, err := newClientWithConfig(config) + cfgJSON, err := json.Marshal(cfg) if err != nil { - return nil, fmt.Errorf("failed to start xDS client: %v", err) + return nil, fmt.Errorf("failed to marshal bootstrap configuration: %v", err) + } + if err := bootstrap.SetFallbackBootstrapConfig(cfgJSON); err != nil { + return nil, fmt.Errorf("failed to set fallback bootstrap configuration: %v", err) } t = resolver.Target{ @@ -134,66 +120,36 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts Path: t.URL.Path, }, } - xdsR, err := resolver.Get(xdsName).Build(t, cc, opts) - if err != nil { - close() - return nil, err - } - return &c2pResolver{ - Resolver: xdsR, - clientCloseFunc: close, - }, nil + return resolver.Get(xdsName).Build(t, cc, opts) } func (b c2pResolverBuilder) Scheme() string { return c2pScheme } -type c2pResolver struct { - resolver.Resolver - clientCloseFunc func() -} - -func (r *c2pResolver) Close() { - r.Resolver.Close() - r.clientCloseFunc() -} - -var id = fmt.Sprintf("C2P-%d", rand.Int()) - -func newNodeConfig(zone string, ipv6Capable bool) string { - metadata := "" +func newNodeConfig(zone string, ipv6Capable bool) map[string]any { + node := map[string]any{ + "id": fmt.Sprintf("C2P-%d", randInt()), + "locality": map[string]any{"zone": zone}, + } if ipv6Capable { - metadata = fmt.Sprintf(`, "metadata": { "%s": true }`, ipv6CapableMetadataName) + node["metadata"] = map[string]any{ipv6CapableMetadataName: true} } - - return fmt.Sprintf(` - { - "id": "%s", - "locality": { - "zone": "%s" - } - %s - }`, id, zone, metadata) + return node } -func newAuthoritiesConfig(xdsServer string) string { - return fmt.Sprintf(` - { - "%s": { - "xds_servers": [%s] - } +func newAuthoritiesConfig(serverCfg map[string]any) map[string]any { + return map[string]any{ + c2pAuthority: map[string]any{"xds_servers": []any{serverCfg}}, } - `, c2pAuthority, xdsServer) } -func newXdsServerConfig(xdsServerURI string) string { - return fmt.Sprintf(` - { - "server_uri": "%s", - "channel_creds": [{"type": "google_default"}], - "server_features": ["xds_v3", "ignore_resource_deletion", "xds.config.resource-in-sotw"] - }`, xdsServerURI) +func newXdsServerConfig(uri string) map[string]any { + return map[string]any{ + "server_uri": uri, + "channel_creds": []map[string]any{{"type": "google_default"}}, + "server_features": []any{"ignore_resource_deletion"}, + } } // runDirectPath returns whether this resolver should use direct path. diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 8e97e104ed4b1..9a112e276977d 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -207,7 +207,7 @@ func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) e } // A root provider is required whether we are using TLS or mTLS. - cpc := b.xdsClient.BootstrapConfig().CertProviderConfigs + cpc := b.xdsClient.BootstrapConfig().CertProviderConfigs() rootProvider, err := buildProvider(cpc, config.RootInstanceName, config.RootCertName, false, true) if err != nil { return err @@ -309,8 +309,8 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro b.lbCfg = lbCfg // Handle the update in a blocking fashion. - done := make(chan struct{}) - ok = b.serializer.Schedule(func(context.Context) { + errCh := make(chan error, 1) + callback := func(context.Context) { // A config update with a changed top-level cluster name means that none // of our old watchers make any sense any more. b.closeAllWatchers() @@ -319,20 +319,20 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro // could end up creating more watchers if turns out to be an aggregate // cluster. b.createAndAddWatcherForCluster(lbCfg.ClusterName) - close(done) - }) - if !ok { + errCh <- nil + } + onFailure := func() { // The call to Schedule returns false *only* if the serializer has been // closed, which happens only when we receive an update after close. - return errBalancerClosed + errCh <- errBalancerClosed } - <-done - return nil + b.serializer.ScheduleOr(callback, onFailure) + return <-errCh } // ResolverError handles errors reported by the xdsResolver. func (b *cdsBalancer) ResolverError(err error) { - b.serializer.Schedule(func(context.Context) { + b.serializer.TrySchedule(func(context.Context) { // Resource not found error is reported by the resolver when the // top-level cluster resource is removed by the management server. if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { @@ -351,7 +351,7 @@ func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } -// Closes all registered cluster wathers and removes them from the internal map. +// Closes all registered cluster watchers and removes them from the internal map. // // Only executed in the context of a serializer callback. func (b *cdsBalancer) closeAllWatchers() { @@ -364,7 +364,7 @@ func (b *cdsBalancer) closeAllWatchers() { // Close cancels the CDS watch, closes the child policy and closes the // cdsBalancer. func (b *cdsBalancer) Close() { - b.serializer.Schedule(func(ctx context.Context) { + b.serializer.TrySchedule(func(context.Context) { b.closeAllWatchers() if b.childLB != nil { @@ -384,7 +384,7 @@ func (b *cdsBalancer) Close() { } func (b *cdsBalancer) ExitIdle() { - b.serializer.Schedule(func(context.Context) { + b.serializer.TrySchedule(func(context.Context) { if b.childLB == nil { b.logger.Warningf("Received ExitIdle with no child policy") return diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go index 0b0d168376d74..835461d0997bc 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go @@ -32,22 +32,19 @@ type clusterWatcher struct { parent *cdsBalancer } -func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData) { - cw.parent.serializer.Schedule(func(context.Context) { - cw.parent.onClusterUpdate(cw.name, u.Resource) - }) +func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { cw.parent.onClusterUpdate(cw.name, u.Resource); onDone() } + cw.parent.serializer.ScheduleOr(handleUpdate, onDone) } -func (cw *clusterWatcher) OnError(err error) { - cw.parent.serializer.Schedule(func(context.Context) { - cw.parent.onClusterError(cw.name, err) - }) +func (cw *clusterWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { cw.parent.onClusterError(cw.name, err); onDone() } + cw.parent.serializer.ScheduleOr(handleError, onDone) } -func (cw *clusterWatcher) OnResourceDoesNotExist() { - cw.parent.serializer.Schedule(func(context.Context) { - cw.parent.onClusterResourceNotFound(cw.name) - }) +func (cw *clusterWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { cw.parent.onClusterResourceNotFound(cw.name); onDone() } + cw.parent.serializer.ScheduleOr(handleNotFound, onDone) } // watcherState groups the state associated with a clusterWatcher. diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go index 164f3099d2805..0dc71dfedebc5 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/clusterimpl.go @@ -24,6 +24,7 @@ package clusterimpl import ( + "context" "encoding/json" "fmt" "sync" @@ -31,8 +32,8 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -52,6 +53,11 @@ const ( defaultRequestCountMax = 1024 ) +var ( + connectedAddress = internal.ConnectedAddress.(func(balancer.SubConnState) resolver.Address) + errBalancerClosed = fmt.Errorf("%s LB policy is closed", Name) +) + func init() { balancer.Register(bb{}) } @@ -59,18 +65,17 @@ func init() { type bb struct{} func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { + ctx, cancel := context.WithCancel(context.Background()) b := &clusterImplBalancer{ - ClientConn: cc, - bOpts: bOpts, - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - loadWrapper: loadstore.NewWrapper(), - pickerUpdateCh: buffer.NewUnbounded(), - requestCountMax: defaultRequestCountMax, + ClientConn: cc, + bOpts: bOpts, + loadWrapper: loadstore.NewWrapper(), + requestCountMax: defaultRequestCountMax, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } b.logger = prefixLogger(b) b.child = gracefulswitch.NewBalancer(b, bOpts) - go b.run() b.logger.Infof("Created") return b } @@ -86,18 +91,6 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err type clusterImplBalancer struct { balancer.ClientConn - // mu guarantees mutual exclusion between Close() and handling of picker - // update to the parent ClientConn in run(). It's to make sure that the - // run() goroutine doesn't send picker update to parent after the balancer - // is closed. - // - // It's only used by the run() goroutine, but not the other exported - // functions. Because the exported functions are guaranteed to be - // synchronized with Close(). - mu sync.Mutex - closed *grpcsync.Event - done *grpcsync.Event - bOpts balancer.BuildOptions logger *grpclog.PrefixLogger xdsClient xdsclient.XDSClient @@ -112,10 +105,11 @@ type clusterImplBalancer struct { clusterNameMu sync.Mutex clusterName string + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + // childState/drops/requestCounter keeps the state used by the most recently - // generated picker. All fields can only be accessed in run(). And run() is - // the only goroutine that sends picker to the parent ClientConn. All - // requests to update picker need to be sent to pickerUpdateCh. + // generated picker. childState balancer.State dropCategories []DropConfig // The categories for drops. drops []*dropper @@ -124,7 +118,6 @@ type clusterImplBalancer struct { requestCounter *xdsclient.ClusterRequestsCounter requestCountMax uint32 telemetryLabels map[string]string - pickerUpdateCh *buffer.Unbounded } // updateLoadStore checks the config for load store, and decides whether it @@ -205,14 +198,9 @@ func (b *clusterImplBalancer) updateLoadStore(newConfig *LBConfig) error { return nil } -func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { - if b.closed.HasFired() { - b.logger.Warningf("xds: received ClientConnState {%+v} after clusterImplBalancer was closed", s) - return nil - } - +func (b *clusterImplBalancer) updateClientConnState(s balancer.ClientConnState) error { if b.logger.V(2) { - b.logger.Infof("Received update from resolver, balancer config: %s", pretty.ToJSON(s.BalancerConfig)) + b.logger.Infof("Received configuration: %s", pretty.ToJSON(s.BalancerConfig)) } newConfig, ok := s.BalancerConfig.(*LBConfig) if !ok { @@ -224,7 +212,7 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) // it. bb := balancer.Get(newConfig.ChildPolicy.Name) if bb == nil { - return fmt.Errorf("balancer %q not registered", newConfig.ChildPolicy.Name) + return fmt.Errorf("child policy %q not registered", newConfig.ChildPolicy.Name) } if b.xdsClient == nil { @@ -250,9 +238,14 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) } b.config = newConfig - // Notify run() of this new config, in case drop and request counter need - // update (which means a new picker needs to be generated). - b.pickerUpdateCh.Put(newConfig) + b.telemetryLabels = newConfig.TelemetryLabels + dc := b.handleDropAndRequestCount(newConfig) + if dc != nil && b.childState.Picker != nil { + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: b.newPicker(dc), + }) + } // Addresses and sub-balancer config are sent to sub-balancer. return b.child.UpdateClientConnState(balancer.ClientConnState{ @@ -261,20 +254,28 @@ func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) }) } -func (b *clusterImplBalancer) ResolverError(err error) { - if b.closed.HasFired() { - b.logger.Warningf("xds: received resolver error {%+v} after clusterImplBalancer was closed", err) - return +func (b *clusterImplBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // Handle the update in a blocking fashion. + errCh := make(chan error, 1) + callback := func(context.Context) { + errCh <- b.updateClientConnState(s) + } + onFailure := func() { + // An attempt to schedule callback fails only when an update is received + // after Close(). + errCh <- errBalancerClosed } - b.child.ResolverError(err) + b.serializer.ScheduleOr(callback, onFailure) + return <-errCh } -func (b *clusterImplBalancer) updateSubConnState(sc balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) { - if b.closed.HasFired() { - b.logger.Warningf("xds: received subconn state change {%+v, %+v} after clusterImplBalancer was closed", sc, s) - return - } +func (b *clusterImplBalancer) ResolverError(err error) { + b.serializer.TrySchedule(func(context.Context) { + b.child.ResolverError(err) + }) +} +func (b *clusterImplBalancer) updateSubConnState(_ balancer.SubConn, s balancer.SubConnState, cb func(balancer.SubConnState)) { // Trigger re-resolution when a SubConn turns transient failure. This is // necessary for the LogicalDNS in cluster_resolver policy to re-resolve. // @@ -296,26 +297,40 @@ func (b *clusterImplBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer } func (b *clusterImplBalancer) Close() { - b.mu.Lock() - b.closed.Fire() - b.mu.Unlock() - - b.child.Close() - b.childState = balancer.State{} - b.pickerUpdateCh.Close() - <-b.done.Done() - b.logger.Infof("Shutdown") + b.serializer.TrySchedule(func(_ context.Context) { + b.child.Close() + b.childState = balancer.State{} + + if b.cancelLoadReport != nil { + b.cancelLoadReport() + b.cancelLoadReport = nil + } + b.logger.Infof("Shutdown") + }) + b.serializerCancel() + <-b.serializer.Done() } func (b *clusterImplBalancer) ExitIdle() { - b.child.ExitIdle() + b.serializer.TrySchedule(func(context.Context) { + b.child.ExitIdle() + }) } // Override methods to accept updates from the child LB. func (b *clusterImplBalancer) UpdateState(state balancer.State) { - // Instead of updating parent ClientConn inline, send state to run(). - b.pickerUpdateCh.Put(state) + b.serializer.TrySchedule(func(context.Context) { + b.childState = state + b.ClientConn.UpdateState(balancer.State{ + ConnectivityState: b.childState.ConnectivityState, + Picker: b.newPicker(&dropConfigs{ + drops: b.drops, + requestCounter: b.requestCounter, + requestCountMax: b.requestCountMax, + }), + }) + }) } func (b *clusterImplBalancer) setClusterName(n string) { @@ -360,22 +375,37 @@ func (scw *scWrapper) localityID() xdsinternal.LocalityID { func (b *clusterImplBalancer) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { clusterName := b.getClusterName() newAddrs := make([]resolver.Address, len(addrs)) - var lID xdsinternal.LocalityID for i, addr := range addrs { newAddrs[i] = xds.SetXDSHandshakeClusterName(addr, clusterName) - lID = xdsinternal.GetLocalityID(newAddrs[i]) } var sc balancer.SubConn + scw := &scWrapper{} oldListener := opts.StateListener - opts.StateListener = func(state balancer.SubConnState) { b.updateSubConnState(sc, state, oldListener) } + opts.StateListener = func(state balancer.SubConnState) { + b.serializer.TrySchedule(func(context.Context) { + b.updateSubConnState(sc, state, oldListener) + if state.ConnectivityState != connectivity.Ready { + return + } + // Read connected address and call updateLocalityID() based on the connected + // address's locality. https://github.com/grpc/grpc-go/issues/7339 + addr := connectedAddress(state) + lID := xdsinternal.GetLocalityID(addr) + if lID.Empty() { + if b.logger.V(2) { + b.logger.Infof("Locality ID for %s unexpectedly empty", addr) + } + return + } + scw.updateLocalityID(lID) + }) + } sc, err := b.ClientConn.NewSubConn(newAddrs, opts) if err != nil { return nil, err } - // Wrap this SubConn in a wrapper, and add it to the map. - ret := &scWrapper{SubConn: sc} - ret.updateLocalityID(lID) - return ret, nil + scw.SubConn = sc + return scw, nil } func (b *clusterImplBalancer) RemoveSubConn(sc balancer.SubConn) { @@ -448,49 +478,3 @@ func (b *clusterImplBalancer) handleDropAndRequestCount(newConfig *LBConfig) *dr requestCountMax: b.requestCountMax, } } - -func (b *clusterImplBalancer) run() { - defer b.done.Fire() - for { - select { - case update, ok := <-b.pickerUpdateCh.Get(): - if !ok { - return - } - b.pickerUpdateCh.Load() - b.mu.Lock() - if b.closed.HasFired() { - b.mu.Unlock() - return - } - switch u := update.(type) { - case balancer.State: - b.childState = u - b.ClientConn.UpdateState(balancer.State{ - ConnectivityState: b.childState.ConnectivityState, - Picker: b.newPicker(&dropConfigs{ - drops: b.drops, - requestCounter: b.requestCounter, - requestCountMax: b.requestCountMax, - }), - }) - case *LBConfig: - b.telemetryLabels = u.TelemetryLabels - dc := b.handleDropAndRequestCount(u) - if dc != nil && b.childState.Picker != nil { - b.ClientConn.UpdateState(balancer.State{ - ConnectivityState: b.childState.ConnectivityState, - Picker: b.newPicker(dc), - }) - } - } - b.mu.Unlock() - case <-b.closed.Done(): - if b.cancelLoadReport != nil { - b.cancelLoadReport() - b.cancelLoadReport = nil - } - return - } - } -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go index d8cb8df1a81c7..fbadbb92ba39c 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterimpl/picker.go @@ -19,6 +19,8 @@ package clusterimpl import ( + "context" + v3orcapb "github.com/cncf/xds/go/xds/data/orca/v3" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" @@ -96,14 +98,23 @@ func (b *clusterImplBalancer) newPicker(config *dropConfigs) *picker { } } +func telemetryLabels(ctx context.Context) map[string]string { + if ctx == nil { + return nil + } + labels := stats.GetLabels(ctx) + if labels == nil { + return nil + } + return labels.TelemetryLabels +} + func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { // Unconditionally set labels if present, even dropped or queued RPC's can // use these labels. - if info.Ctx != nil { - if labels := stats.GetLabels(info.Ctx); labels != nil && labels.TelemetryLabels != nil { - for key, value := range d.telemetryLabels { - labels.TelemetryLabels[key] = value - } + if labels := telemetryLabels(info.Ctx); labels != nil { + for key, value := range d.telemetryLabels { + labels[key] = value } } @@ -156,6 +167,10 @@ func (d *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { return pr, err } + if labels := telemetryLabels(info.Ctx); labels != nil { + labels["grpc.lb.locality"] = lIDStr + } + if d.loadStore != nil { d.loadStore.CallStarted(lIDStr) oldDone := pr.Done diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go index 83ead92a4a697..749945059b88d 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/clusterresolver.go @@ -207,11 +207,6 @@ func (b *clusterResolverBalancer) handleClientConnUpdate(update *ccUpdate) { // handleResourceUpdate handles a resource update or error from the resource // resolver by propagating the same to the child LB policy. func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { - if err := update.err; err != nil { - b.handleErrorFromUpdate(err, false) - return - } - b.watchUpdateReceived = true b.priorities = update.priorities @@ -219,6 +214,10 @@ func (b *clusterResolverBalancer) handleResourceUpdate(update *resourceUpdate) { // for all configured discovery mechanisms ordered by priority. This is used // to generate configuration for the priority LB policy. b.updateChildConfig() + + if update.onDone != nil { + update.onDone() + } } // updateChildConfig builds child policy configuration using endpoint addresses diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go index 151c54dae6d09..5bc64b86305c6 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -30,8 +30,14 @@ import ( // resourceUpdate is a combined update from all the resources, in the order of // priority. For example, it can be {EDS, EDS, DNS}. type resourceUpdate struct { + // A discovery mechanism would return an empty update when it runs into + // errors, and this would result in the priority LB policy reporting + // TRANSIENT_FAILURE (if there was a single discovery mechanism), or would + // fallback to the next highest priority that is available. priorities []priorityConfig - err error + // To be invoked once the update is completely processed, or is dropped in + // favor of a newer update. + onDone xdsresource.OnDoneFunc } // topLevelResolver is used by concrete endpointsResolver implementations for @@ -39,7 +45,11 @@ type resourceUpdate struct { // interface and takes appropriate actions upon receipt of updates and errors // from underlying concrete resolvers. type topLevelResolver interface { - onUpdate() + // onUpdate is called when a new update is received from the underlying + // endpointsResolver implementation. The onDone callback is to be invoked + // once the update is completely processed, or is dropped in favor of a + // newer update. + onUpdate(onDone xdsresource.OnDoneFunc) } // endpointsResolver wraps the functionality to resolve a given resource name to @@ -205,7 +215,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { } // Regenerate even if there's no change in discovery mechanism, in case // priority order changed. - rr.generateLocked() + rr.generateLocked(func() {}) } // resolveNow is typically called to trigger re-resolve of DNS. The EDS @@ -252,7 +262,10 @@ func (rr *resourceResolver) stop(closing bool) { // after they are stopped. Therefore, we don't have to worry about another // write to this channel happening at the same time as this one. select { - case <-rr.updateChannel: + case ru := <-rr.updateChannel: + if ru.onDone != nil { + ru.onDone() + } default: } rr.updateChannel <- &resourceUpdate{} @@ -262,14 +275,20 @@ func (rr *resourceResolver) stop(closing bool) { // result on the update channel if all child resolvers have received at least // one update. Otherwise it returns early. // -// caller must hold rr.mu. -func (rr *resourceResolver) generateLocked() { +// The onDone callback is invoked inline if not all child resolvers have +// received at least one update. If all child resolvers have received at least +// one update, onDone is invoked when the combined update is processed by the +// clusterresolver LB policy. +// +// Caller must hold rr.mu. +func (rr *resourceResolver) generateLocked(onDone xdsresource.OnDoneFunc) { var ret []priorityConfig for _, rDM := range rr.children { u, ok := rDM.r.lastUpdate() if !ok { // Don't send updates to parent until all resolvers have update to // send. + onDone() return } switch uu := u.(type) { @@ -280,16 +299,23 @@ func (rr *resourceResolver) generateLocked() { } } select { - case <-rr.updateChannel: + // A previously unprocessed update is dropped in favor of the new one, and + // the former's onDone callback is invoked to unblock the xDS client's + // receive path. + case ru := <-rr.updateChannel: + if ru.onDone != nil { + ru.onDone() + } default: } - rr.updateChannel <- &resourceUpdate{priorities: ret} + rr.updateChannel <- &resourceUpdate{priorities: ret, onDone: onDone} } -func (rr *resourceResolver) onUpdate() { - rr.serializer.Schedule(func(context.Context) { +func (rr *resourceResolver) onUpdate(onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { rr.mu.Lock() - rr.generateLocked() + rr.generateLocked(onDone) rr.mu.Unlock() - }) + } + rr.serializer.ScheduleOr(handleUpdate, func() { onDone() }) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go index efdc3088a395c..cfc871d3b59d6 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_dns.go @@ -79,7 +79,7 @@ func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *gr ret.logger.Infof("Failed to parse dns hostname %q in clusterresolver LB policy", target) } ret.updateReceived = true - ret.topLevelResolver.onUpdate() + ret.topLevelResolver.onUpdate(func() {}) return ret } @@ -89,7 +89,7 @@ func newDNSResolver(target string, topLevelResolver topLevelResolver, logger *gr ret.logger.Infof("Failed to build DNS resolver for target %q: %v", target, err) } ret.updateReceived = true - ret.topLevelResolver.onUpdate() + ret.topLevelResolver.onUpdate(func() {}) return ret } ret.dnsR = r @@ -153,7 +153,7 @@ func (dr *dnsDiscoveryMechanism) UpdateState(state resolver.State) error { dr.updateReceived = true dr.mu.Unlock() - dr.topLevelResolver.onUpdate() + dr.topLevelResolver.onUpdate(func() {}) return nil } @@ -176,7 +176,7 @@ func (dr *dnsDiscoveryMechanism) ReportError(err error) { dr.updateReceived = true dr.mu.Unlock() - dr.topLevelResolver.onUpdate() + dr.topLevelResolver.onUpdate(func() {}) } func (dr *dnsDiscoveryMechanism) NewAddress(addresses []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go index 3d0ec356e93a9..ddb949019ee5b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver_eds.go @@ -76,8 +76,9 @@ func newEDSResolver(nameToWatch string, producer xdsresource.Producer, topLevelR } // OnUpdate is invoked to report an update for the resource being watched. -func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData) { +func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceData, onDone xdsresource.OnDoneFunc) { if er.stopped.HasFired() { + onDone() return } @@ -85,11 +86,12 @@ func (er *edsDiscoveryMechanism) OnUpdate(update *xdsresource.EndpointsResourceD er.update = &update.Resource er.mu.Unlock() - er.topLevelResolver.onUpdate() + er.topLevelResolver.onUpdate(onDone) } -func (er *edsDiscoveryMechanism) OnError(err error) { +func (er *edsDiscoveryMechanism) OnError(err error, onDone xdsresource.OnDoneFunc) { if er.stopped.HasFired() { + onDone() return } @@ -102,6 +104,7 @@ func (er *edsDiscoveryMechanism) OnError(err error) { // Continue using a previously received good configuration if one // exists. er.mu.Unlock() + onDone() return } @@ -114,11 +117,12 @@ func (er *edsDiscoveryMechanism) OnError(err error) { er.update = &xdsresource.EndpointsUpdate{} er.mu.Unlock() - er.topLevelResolver.onUpdate() + er.topLevelResolver.onUpdate(onDone) } -func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() { +func (er *edsDiscoveryMechanism) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { if er.stopped.HasFired() { + onDone() return } @@ -136,5 +140,5 @@ func (er *edsDiscoveryMechanism) OnResourceDoesNotExist() { er.update = &xdsresource.EndpointsUpdate{} er.mu.Unlock() - er.topLevelResolver.onUpdate() + er.topLevelResolver.onUpdate(onDone) } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go index 8ce958d71ca86..f5605df83276b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/loadstore/load_store_wrapper.go @@ -36,7 +36,7 @@ func NewWrapper() *Wrapper { // update its internal perCluster store so that new stats will be added to the // correct perCluster. // -// Note that this struct is a temporary walkaround before we implement graceful +// Note that this struct is a temporary workaround before we implement graceful // switch for EDS. Any update to the clusterName and serviceName is too early, // the perfect timing is when the picker is updated with the new connection. // This early update could cause picks for the old SubConn being reported to the diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go index 80d3d444697ed..53ba72c0813f3 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/outlierdetection/balancer.go @@ -592,20 +592,6 @@ func (b *outlierDetectionBalancer) Target() string { return b.cc.Target() } -func max(x, y time.Duration) time.Duration { - if x < y { - return y - } - return x -} - -func min(x, y time.Duration) time.Duration { - if x < y { - return x - } - return y -} - // handleSubConnUpdate stores the recent state and forward the update // if the SubConn is not ejected. func (b *outlierDetectionBalancer) handleSubConnUpdate(u *scUpdate) { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go index 988ca280789ed..c17c62f23a597 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go @@ -270,6 +270,7 @@ func (b *priorityBalancer) run() { // deadlock. b.mu.Lock() if b.done.HasFired() { + b.mu.Unlock() return } switch s := u.(type) { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go index 4655bf418474e..0be807c134a16 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer_priority.go @@ -83,7 +83,9 @@ var ( // Caller must hold b.mu. func (b *priorityBalancer) syncPriority(childUpdating string) { if b.inhibitPickerUpdates { - b.logger.Debugf("Skipping update from child policy %q", childUpdating) + if b.logger.V(2) { + b.logger.Infof("Skipping update from child policy %q", childUpdating) + } return } for p, name := range b.priorities { @@ -99,12 +101,16 @@ func (b *priorityBalancer) syncPriority(childUpdating string) { (child.state.ConnectivityState == connectivity.Connecting && child.initTimer != nil) || p == len(b.priorities)-1 { if b.childInUse != child.name || child.name == childUpdating { - b.logger.Debugf("childInUse, childUpdating: %q, %q", b.childInUse, child.name) + if b.logger.V(2) { + b.logger.Infof("childInUse, childUpdating: %q, %q", b.childInUse, child.name) + } // If we switch children or the child in use just updated its // picker, push the child's picker to the parent. b.cc.UpdateState(child.state) } - b.logger.Debugf("Switching to (%q, %v) in syncPriority", child.name, p) + if b.logger.V(2) { + b.logger.Infof("Switching to (%q, %v) in syncPriority", child.name, p) + } b.switchToChild(child, p) break } diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go index b450716fa0f05..5ce72caded482 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/picker.go @@ -159,28 +159,3 @@ func nextSkippingDuplicates(ring *ring, entry *ringEntry) *ringEntry { // There's no qualifying next entry. return nil } - -// nextSkippingDuplicatesSubConn finds the next subconn in the ring, that's -// different from the given subconn. -func nextSkippingDuplicatesSubConn(ring *ring, sc *subConn) *subConn { - var entry *ringEntry - for _, it := range ring.items { - if it.sc == sc { - entry = it - break - } - } - if entry == nil { - // If the given subconn is not in the ring (e.g. it was deleted), return - // the first one. - if len(ring.items) > 0 { - return ring.items[0].sc - } - return nil - } - ee := nextSkippingDuplicates(ring, entry) - if ee == nil { - return nil - } - return ee.sc -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go index eac89b5b4d05a..45dbb2d2a83f8 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ring.go @@ -67,11 +67,15 @@ type ringEntry struct { // // Must be called with a non-empty subConns map. func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, logger *grpclog.PrefixLogger) *ring { - logger.Debugf("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize) + if logger.V(2) { + logger.Infof("newRing: number of subConns is %d, minRingSize is %d, maxRingSize is %d", subConns.Len(), minRingSize, maxRingSize) + } // https://github.com/envoyproxy/envoy/blob/765c970f06a4c962961a0e03a467e165b276d50f/source/common/upstream/ring_hash_lb.cc#L114 normalizedWeights, minWeight := normalizeWeights(subConns) - logger.Debugf("newRing: normalized subConn weights is %v", normalizedWeights) + if logger.V(2) { + logger.Infof("newRing: normalized subConn weights is %v", normalizedWeights) + } // Normalized weights for {3,3,4} is {0.3,0.3,0.4}. @@ -82,7 +86,9 @@ func newRing(subConns *resolver.AddressMap, minRingSize, maxRingSize uint64, log scale := math.Min(math.Ceil(minWeight*float64(minRingSize))/minWeight, float64(maxRingSize)) ringSize := math.Ceil(scale) items := make([]*ringEntry, 0, int(ringSize)) - logger.Debugf("newRing: creating new ring of size %v", ringSize) + if logger.V(2) { + logger.Infof("newRing: creating new ring of size %v", ringSize) + } // For each entry, scale*weight nodes are generated in the ring. // diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go index e63c6f653904a..ef054d48aa4e7 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/ringhash/ringhash.go @@ -44,12 +44,13 @@ func init() { type bb struct{} -func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Balancer { +func (bb) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer { b := &ringhashBalancer{ - cc: cc, - subConns: resolver.NewAddressMap(), - scStates: make(map[balancer.SubConn]*subConn), - csEvltr: &connectivityStateEvaluator{}, + cc: cc, + subConns: resolver.NewAddressMap(), + scStates: make(map[balancer.SubConn]*subConn), + csEvltr: &connectivityStateEvaluator{}, + orderedSubConns: make([]*subConn, 0), } b.logger = prefixLogger(b) b.logger.Infof("Created") @@ -197,6 +198,14 @@ type ringhashBalancer struct { resolverErr error // the last error reported by the resolver; cleared on successful resolution connErr error // the last connection error; cleared upon leaving TransientFailure + + // orderedSubConns contains the list of subconns in the order that addresses + // appear from the resolver. Together with lastInternallyTriggeredSCIndex, + // this allows triggering connection attempts to all SubConns independently + // of the order they appear on the ring. Always in sync with ring and + // subConns. The index is reset when addresses change. + orderedSubConns []*subConn + lastInternallyTriggeredSCIndex int } // updateAddresses creates new SubConns and removes SubConns, based on the @@ -214,6 +223,9 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { var addrsUpdated bool // addrsSet is the set converted from addrs, used for quick lookup. addrsSet := resolver.NewAddressMap() + + b.orderedSubConns = b.orderedSubConns[:0] // reuse the underlying array. + for _, addr := range addrs { addrsSet.Set(addr, true) newWeight := getWeightAttribute(addr) @@ -234,6 +246,7 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { b.state = b.csEvltr.recordTransition(connectivity.Shutdown, connectivity.Idle) b.subConns.Set(addr, scs) b.scStates[sc] = scs + b.orderedSubConns = append(b.orderedSubConns, scs) addrsUpdated = true } else { // We have seen this address before and created a subConn for it. If the @@ -244,6 +257,7 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { // since *only* the weight attribute has changed, and that does not affect // subConn uniqueness. scInfo := val.(*subConn) + b.orderedSubConns = append(b.orderedSubConns, scInfo) if oldWeight := scInfo.weight; oldWeight != newWeight { scInfo.weight = newWeight b.subConns.Set(addr, scInfo) @@ -264,6 +278,9 @@ func (b *ringhashBalancer) updateAddresses(addrs []resolver.Address) bool { // The entry will be deleted in updateSubConnState. } } + if addrsUpdated { + b.lastInternallyTriggeredSCIndex = 0 + } return addrsUpdated } @@ -399,19 +416,11 @@ func (b *ringhashBalancer) updateSubConnState(sc balancer.SubConn, state balance return } } - // Trigger a SubConn (this updated SubConn's next SubConn in the ring) - // to connect if nobody is attempting to connect. - sc := nextSkippingDuplicatesSubConn(b.ring, scs) - if sc != nil { - sc.queueConnect() - return - } - // This handles the edge case where we have a single subConn in the - // ring. nextSkippingDuplicatesSubCon() would have returned nil. We - // still need to ensure that some subConn is attempting to connect, in - // order to give the LB policy a chance to move out of - // TRANSIENT_FAILURE. Hence, we try connecting on the current subConn. - scs.queueConnect() + + // Trigger a SubConn (the next in the order addresses appear in the + // resolver) to connect if nobody is attempting to connect. + b.lastInternallyTriggeredSCIndex = (b.lastInternallyTriggeredSCIndex + 1) % len(b.orderedSubConns) + b.orderedSubConns[b.lastInternallyTriggeredSCIndex].queueConnect() } } diff --git a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go index 74abfec1fa88e..89837605c1d19 100644 --- a/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go +++ b/vendor/google.golang.org/grpc/xds/internal/clusterspecifier/rls/rls.go @@ -65,13 +65,13 @@ func (rls) ParseClusterSpecifierConfig(cfg proto.Message) (clusterspecifier.Bala if cfg == nil { return nil, fmt.Errorf("rls_csp: nil configuration message provided") } - any, ok := cfg.(*anypb.Any) + m, ok := cfg.(*anypb.Any) if !ok { return nil, fmt.Errorf("rls_csp: error parsing config %v: unknown type %T", cfg, cfg) } rlcs := new(rlspb.RouteLookupClusterSpecifier) - if err := any.UnmarshalTo(rlcs); err != nil { + if err := m.UnmarshalTo(rlcs); err != nil { return nil, fmt.Errorf("rls_csp: error parsing config %v: %v", cfg, err) } rlcJSON, err := protojson.Marshal(rlcs.GetRouteLookupConfig()) diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go index 90155d80be325..5a82490598a34 100644 --- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/fault/fault.go @@ -81,12 +81,12 @@ func parseConfig(cfg proto.Message) (httpfilter.FilterConfig, error) { if cfg == nil { return nil, fmt.Errorf("fault: nil configuration message provided") } - any, ok := cfg.(*anypb.Any) + m, ok := cfg.(*anypb.Any) if !ok { return nil, fmt.Errorf("fault: error parsing config %v: unknown type %T", cfg, cfg) } msg := new(fpb.HTTPFault) - if err := any.UnmarshalTo(msg); err != nil { + if err := m.UnmarshalTo(msg); err != nil { return nil, fmt.Errorf("fault: error parsing config %v: %v", cfg, err) } return config{config: msg}, nil @@ -139,7 +139,7 @@ type interceptor struct { var activeFaults uint32 // global active faults; accessed atomically -func (i *interceptor) NewStream(ctx context.Context, ri iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { +func (i *interceptor) NewStream(ctx context.Context, _ iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { if maxAF := i.config.GetMaxActiveFaults(); maxAF != nil { defer atomic.AddUint32(&activeFaults, ^uint32(0)) // decrement counter if af := atomic.AddUint32(&activeFaults, 1); af > maxAF.GetValue() { @@ -296,5 +296,5 @@ func (*okStream) Header() (metadata.MD, error) { return nil, nil } func (*okStream) Trailer() metadata.MD { return nil } func (*okStream) CloseSend() error { return nil } func (o *okStream) Context() context.Context { return o.ctx } -func (*okStream) SendMsg(m any) error { return io.EOF } -func (*okStream) RecvMsg(m any) error { return io.EOF } +func (*okStream) SendMsg(any) error { return io.EOF } +func (*okStream) RecvMsg(any) error { return io.EOF } diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go index 37de3a39b64f6..bcda2ab05fc8d 100644 --- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go @@ -117,7 +117,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { // "If absent, no enforcing RBAC policy will be applied" - RBAC // Documentation for Rules field. // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configurated." - A41 + // completely ignored, as if RBAC was not configured." - A41 if rbacCfg.Rules == nil || rbacCfg.GetRules().GetAction() == v3rbacpb.RBAC_LOG { return config{}, nil } @@ -128,7 +128,7 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) { ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()}, "") if err != nil { // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configurated." - A41 + // completely ignored, as if RBAC was not configured." - A41 if rbacCfg.GetRules().GetAction() != v3rbacpb.RBAC_LOG { return nil, fmt.Errorf("rbac: error constructing matching engine: %v", err) } @@ -141,12 +141,12 @@ func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, er if cfg == nil { return nil, fmt.Errorf("rbac: nil configuration message provided") } - any, ok := cfg.(*anypb.Any) + m, ok := cfg.(*anypb.Any) if !ok { return nil, fmt.Errorf("rbac: error parsing config %v: unknown type %T", cfg, cfg) } msg := new(rpb.RBAC) - if err := any.UnmarshalTo(msg); err != nil { + if err := m.UnmarshalTo(msg); err != nil { return nil, fmt.Errorf("rbac: error parsing config %v: %v", cfg, err) } return parseConfig(msg) @@ -156,12 +156,12 @@ func (builder) ParseFilterConfigOverride(override proto.Message) (httpfilter.Fil if override == nil { return nil, fmt.Errorf("rbac: nil configuration message provided") } - any, ok := override.(*anypb.Any) + m, ok := override.(*anypb.Any) if !ok { return nil, fmt.Errorf("rbac: error parsing override config %v: unknown type %T", override, override) } msg := new(rpb.RBACPerRoute) - if err := any.UnmarshalTo(msg); err != nil { + if err := m.UnmarshalTo(msg); err != nil { return nil, fmt.Errorf("rbac: error parsing override config %v: %v", override, err) } return parseConfig(msg.Rbac) @@ -198,7 +198,7 @@ func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override http // "If absent, no enforcing RBAC policy will be applied" - RBAC // Documentation for Rules field. // "At this time, if the RBAC.action is Action.LOG then the policy will be - // completely ignored, as if RBAC was not configurated." - A41 + // completely ignored, as if RBAC was not configured." - A41 if c.chainEngine == nil { return nil, nil } diff --git a/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go b/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go index 1675ec86ec11c..a781523d371e1 100644 --- a/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go +++ b/vendor/google.golang.org/grpc/xds/internal/httpfilter/router/router.go @@ -54,12 +54,12 @@ func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, er if cfg == nil { return nil, fmt.Errorf("router: nil configuration message provided") } - any, ok := cfg.(*anypb.Any) + m, ok := cfg.(*anypb.Any) if !ok { return nil, fmt.Errorf("router: error parsing config %v: unknown type %T", cfg, cfg) } msg := new(pb.Router) - if err := any.UnmarshalTo(msg); err != nil { + if err := m.UnmarshalTo(msg); err != nil { return nil, fmt.Errorf("router: error parsing config %v: %v", cfg, err) } return config{}, nil diff --git a/vendor/google.golang.org/grpc/xds/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/internal.go index 7091990500f97..1d8a6b03f1b3b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/internal.go +++ b/vendor/google.golang.org/grpc/xds/internal/internal.go @@ -55,6 +55,11 @@ func (l LocalityID) Equal(o any) bool { return l.Region == ol.Region && l.Zone == ol.Zone && l.SubZone == ol.SubZone } +// Empty returns whether or not the locality ID is empty. +func (l LocalityID) Empty() bool { + return l.Region == "" && l.Zone == "" && l.SubZone == "" +} + // LocalityIDFromString converts a json representation of locality, into a // LocalityID struct. func LocalityIDFromString(s string) (ret LocalityID, _ error) { diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go index f505eeb4394ec..d9c23278281fc 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/internal/internal.go @@ -26,5 +26,5 @@ var ( NewWRR any // func() wrr.WRR // NewXDSClient is the function used to create a new xDS client. - NewXDSClient any // func() (xdsclient.XDSClient, func(), error) + NewXDSClient any // func(string) (xdsclient.XDSClient, func(), error) ) diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go index f5bfc500c11a6..36776f3debdfd 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -182,7 +182,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP if v := atomic.AddInt32(ref, -1); v == 0 { // This entry will be removed from activeClusters when // producing the service config for the empty update. - cs.r.serializer.Schedule(func(context.Context) { + cs.r.serializer.TrySchedule(func(context.Context) { cs.r.onClusterRefDownToZero() }) } @@ -326,7 +326,7 @@ func (cs *configSelector) stop() { // selector; we need another update to delete clusters from the config (if // we don't have another update pending already). if needUpdate { - cs.r.serializer.Schedule(func(context.Context) { + cs.r.serializer.TrySchedule(func(context.Context) { cs.r.onClusterRefDownToZero() }) } @@ -336,7 +336,7 @@ type interceptorList struct { interceptors []iresolver.ClientInterceptor } -func (il *interceptorList) NewStream(ctx context.Context, ri iresolver.RPCInfo, done func(), newStream func(ctx context.Context, done func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { +func (il *interceptorList) NewStream(ctx context.Context, ri iresolver.RPCInfo, _ func(), newStream func(ctx context.Context, _ func()) (iresolver.ClientStream, error)) (iresolver.ClientStream, error) { for i := len(il.interceptors) - 1; i >= 0; i-- { ns := newStream interceptor := il.interceptors[i] diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go index abb3c2c5acf18..0de6604484b1b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go @@ -36,22 +36,19 @@ func newListenerWatcher(resourceName string, parent *xdsResolver) *listenerWatch return lw } -func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData) { - l.parent.serializer.Schedule(func(context.Context) { - l.parent.onListenerResourceUpdate(update.Resource) - }) +func (l *listenerWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { l.parent.onListenerResourceUpdate(update.Resource); onDone() } + l.parent.serializer.ScheduleOr(handleUpdate, onDone) } -func (l *listenerWatcher) OnError(err error) { - l.parent.serializer.Schedule(func(context.Context) { - l.parent.onListenerResourceError(err) - }) +func (l *listenerWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { l.parent.onListenerResourceError(err); onDone() } + l.parent.serializer.ScheduleOr(handleError, onDone) } -func (l *listenerWatcher) OnResourceDoesNotExist() { - l.parent.serializer.Schedule(func(context.Context) { - l.parent.onListenerResourceNotFound() - }) +func (l *listenerWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { l.parent.onListenerResourceNotFound(); onDone() } + l.parent.serializer.ScheduleOr(handleNotFound, onDone) } func (l *listenerWatcher) stop() { @@ -71,22 +68,22 @@ func newRouteConfigWatcher(resourceName string, parent *xdsResolver) *routeConfi return rw } -func (r *routeConfigWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) { - r.parent.serializer.Schedule(func(context.Context) { - r.parent.onRouteConfigResourceUpdate(r.resourceName, update.Resource) - }) +func (r *routeConfigWatcher) OnUpdate(u *xdsresource.RouteConfigResourceData, onDone xdsresource.OnDoneFunc) { + handleUpdate := func(context.Context) { + r.parent.onRouteConfigResourceUpdate(r.resourceName, u.Resource) + onDone() + } + r.parent.serializer.ScheduleOr(handleUpdate, onDone) } -func (r *routeConfigWatcher) OnError(err error) { - r.parent.serializer.Schedule(func(context.Context) { - r.parent.onRouteConfigResourceError(r.resourceName, err) - }) +func (r *routeConfigWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + handleError := func(context.Context) { r.parent.onRouteConfigResourceError(r.resourceName, err); onDone() } + r.parent.serializer.ScheduleOr(handleError, onDone) } -func (r *routeConfigWatcher) OnResourceDoesNotExist() { - r.parent.serializer.Schedule(func(context.Context) { - r.parent.onRouteConfigResourceNotFound(r.resourceName) - }) +func (r *routeConfigWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + handleNotFound := func(context.Context) { r.parent.onRouteConfigResourceNotFound(r.resourceName); onDone() } + r.parent.serializer.ScheduleOr(handleNotFound, onDone) } func (r *routeConfigWatcher) stop() { diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go index 40dd972678112..b5d24e4bf214b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go @@ -49,8 +49,8 @@ const Scheme = "xds" // ClientConns at the same time. func newBuilderForTesting(config []byte) (resolver.Builder, error) { return &xdsResolverBuilder{ - newXDSClient: func() (xdsclient.XDSClient, func(), error) { - return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Contents: config}) + newXDSClient: func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Name: name, Contents: config}) }, }, nil } @@ -64,7 +64,7 @@ func init() { } type xdsResolverBuilder struct { - newXDSClient func() (xdsclient.XDSClient, func(), error) + newXDSClient func(string) (xdsclient.XDSClient, func(), error) } // Build helps implement the resolver.Builder interface. @@ -97,16 +97,16 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon r.serializerCancel = cancel // Initialize the xDS client. - newXDSClient := rinternal.NewXDSClient.(func() (xdsclient.XDSClient, func(), error)) + newXDSClient := rinternal.NewXDSClient.(func(string) (xdsclient.XDSClient, func(), error)) if b.newXDSClient != nil { newXDSClient = b.newXDSClient } - client, close, err := newXDSClient() + client, closeFn, err := newXDSClient(target.String()) if err != nil { return nil, fmt.Errorf("xds: failed to create xds-client: %v", err) } r.xdsClient = client - r.xdsClientClose = close + r.xdsClientClose = closeFn // Determine the listener resource name and start a watcher for it. template, err := r.sanityChecksOnBootstrapConfig(target, opts, r.xdsClient) @@ -128,7 +128,7 @@ func (b *xdsResolverBuilder) Build(target resolver.Target, cc resolver.ClientCon // // Returns the listener resource name template to use. If any of the above // validations fail, a non-nil error is returned. -func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, opts resolver.BuildOptions, client xdsclient.XDSClient) (string, error) { +func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, _ resolver.BuildOptions, client xdsclient.XDSClient) (string, error) { bootstrapConfig := client.BootstrapConfig() if bootstrapConfig == nil { // This is never expected to happen after a successful xDS client @@ -139,9 +139,13 @@ func (r *xdsResolver) sanityChecksOnBootstrapConfig(target resolver.Target, opts // Find the client listener template to use from the bootstrap config: // - If authority is not set in the target, use the top level template // - If authority is set, use the template from the authority map. - template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate + template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate() if authority := target.URL.Host; authority != "" { - a := bootstrapConfig.Authorities[authority] + authorities := bootstrapConfig.Authorities() + if authorities == nil { + return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target) + } + a := authorities[authority] if a == nil { return "", fmt.Errorf("xds: authority %q specified in dial target %q is not found in the bootstrap file", authority, target) } @@ -210,7 +214,7 @@ type xdsResolver struct { } // ResolveNow is a no-op at this point. -func (*xdsResolver) ResolveNow(o resolver.ResolveNowOptions) {} +func (*xdsResolver) ResolveNow(resolver.ResolveNowOptions) {} func (r *xdsResolver) Close() { // Cancel the context passed to the serializer and wait for any scheduled diff --git a/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go index fdba769294dec..d2a13d75c5919 100644 --- a/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go @@ -47,7 +47,7 @@ type connWrapper struct { // The specific filter chain picked for handling this connection. filterChain *xdsresource.FilterChain - // A reference fo the listenerWrapper on which this connection was accepted. + // A reference to the listenerWrapper on which this connection was accepted. parent *listenerWrapper // The certificate providers created for this connection. @@ -107,7 +107,7 @@ func (c *connWrapper) XDSHandshakeInfo() (*xdsinternal.HandshakeInfo, error) { return xdsinternal.NewHandshakeInfo(nil, nil, nil, false), nil } - cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs + cpc := c.parent.xdsC.BootstrapConfig().CertProviderConfigs() // Identity provider name is mandatory on the server-side, and this is // enforced when the resource is received at the XDSClient layer. secCfg := c.filterChain.SecurityCfg diff --git a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go index 174b54c441174..e2246f41afecc 100644 --- a/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go +++ b/vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go @@ -410,7 +410,8 @@ type ldsWatcher struct { name string } -func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData) { +func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData, onDone xdsresource.OnDoneFunc) { + defer onDone() if lw.parent.closed.HasFired() { lw.logger.Warningf("Resource %q received update: %#v after listener was closed", lw.name, update) return @@ -421,7 +422,8 @@ func (lw *ldsWatcher) OnUpdate(update *xdsresource.ListenerResourceData) { lw.parent.handleLDSUpdate(update.Resource) } -func (lw *ldsWatcher) OnError(err error) { +func (lw *ldsWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + defer onDone() if lw.parent.closed.HasFired() { lw.logger.Warningf("Resource %q received error: %v after listener was closed", lw.name, err) return @@ -433,7 +435,8 @@ func (lw *ldsWatcher) OnError(err error) { // continue to use the old configuration. } -func (lw *ldsWatcher) OnResourceDoesNotExist() { +func (lw *ldsWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + defer onDone() if lw.parent.closed.HasFired() { lw.logger.Warningf("Resource %q received resource-does-not-exist error after listener was closed", lw.name) return diff --git a/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go index 67cde4602894a..bcd3938e6f1ab 100644 --- a/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go +++ b/vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go @@ -147,7 +147,8 @@ type rdsWatcher struct { canceled bool // eats callbacks if true } -func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) { +func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData, onDone xdsresource.OnDoneFunc) { + defer onDone() rw.mu.Lock() if rw.canceled { rw.mu.Unlock() @@ -160,7 +161,8 @@ func (rw *rdsWatcher) OnUpdate(update *xdsresource.RouteConfigResourceData) { rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{data: &update.Resource}) } -func (rw *rdsWatcher) OnError(err error) { +func (rw *rdsWatcher) OnError(err error, onDone xdsresource.OnDoneFunc) { + defer onDone() rw.mu.Lock() if rw.canceled { rw.mu.Unlock() @@ -173,7 +175,8 @@ func (rw *rdsWatcher) OnError(err error) { rw.parent.handleRouteUpdate(rw.routeName, rdsWatcherUpdate{err: err}) } -func (rw *rdsWatcher) OnResourceDoesNotExist() { +func (rw *rdsWatcher) OnResourceDoesNotExist(onDone xdsresource.OnDoneFunc) { + defer onDone() rw.mu.Lock() if rw.canceled { rw.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go index b0763a0240318..3251737f181ea 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/authority.go @@ -23,6 +23,7 @@ import ( "fmt" "strings" "sync" + "sync/atomic" "time" "google.golang.org/grpc/internal/grpclog" @@ -118,12 +119,12 @@ func newAuthority(args authorityArgs) (*authority, error) { } tr, err := transport.New(transport.Options{ - ServerCfg: *args.serverCfg, + ServerCfg: args.serverCfg, OnRecvHandler: ret.handleResourceUpdate, OnErrorHandler: ret.newConnectionError, OnSendHandler: ret.transportOnSendHandler, Logger: args.logger, - NodeProto: args.bootstrapCfg.NodeProto, + NodeProto: args.bootstrapCfg.Node(), }) if err != nil { return nil, fmt.Errorf("creating new transport to %q: %v", args.serverCfg, err) @@ -148,7 +149,7 @@ func (a *authority) transportOnSendHandler(u *transport.ResourceSendInfo) { a.startWatchTimersLocked(rType, u.ResourceNames) } -func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate) error { +func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate, onDone func()) error { rType := a.resourceTypeGetter(resourceUpdate.URL) if rType == nil { return xdsresource.NewErrorf(xdsresource.ErrorTypeResourceTypeUnsupported, "Resource URL %v unknown in response from server", resourceUpdate.URL) @@ -159,14 +160,40 @@ func (a *authority) handleResourceUpdate(resourceUpdate transport.ResourceUpdate ServerConfig: a.serverCfg, } updates, md, err := decodeAllResources(opts, rType, resourceUpdate) - a.updateResourceStateAndScheduleCallbacks(rType, updates, md) + a.updateResourceStateAndScheduleCallbacks(rType, updates, md, onDone) return err } -func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata) { +func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Type, updates map[string]resourceDataErrTuple, md xdsresource.UpdateMetadata, onDone func()) { a.resourcesMu.Lock() defer a.resourcesMu.Unlock() + // We build a list of callback funcs to invoke, and invoke them at the end + // of this method instead of inline (when handling the update for a + // particular resource), because we want to make sure that all calls to + // increment watcherCnt happen before any callbacks are invoked. This will + // ensure that the onDone callback is never invoked before all watcher + // callbacks are invoked, and the watchers have processed the update. + watcherCnt := new(atomic.Int64) + done := func() { + watcherCnt.Add(-1) + if watcherCnt.Load() == 0 { + onDone() + } + } + funcsToSchedule := []func(context.Context){} + defer func() { + if len(funcsToSchedule) == 0 { + // When there are no watchers for the resources received as part of + // this update, invoke onDone explicitly to unblock the next read on + // the ADS stream. + onDone() + } + for _, f := range funcsToSchedule { + a.serializer.ScheduleOr(f, onDone) + } + }() + resourceStates := a.resources[rType] for name, uErr := range updates { if state, ok := resourceStates[name]; ok { @@ -210,7 +237,8 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty for watcher := range state.watchers { watcher := watcher err := uErr.err - a.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnError(err, done) }) } continue } @@ -225,11 +253,14 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty for watcher := range state.watchers { watcher := watcher resource := uErr.resource - a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnUpdate(resource, done) }) } } // Sync cache. - a.logger.Debugf("Resource type %q with name %q added to cache", rType.TypeName(), name) + if a.logger.V(2) { + a.logger.Infof("Resource type %q with name %q added to cache", rType.TypeName(), name) + } state.cache = uErr.resource // Set status to ACK, and clear error state. The metadata might be a // NACK metadata because some other resources in the same response @@ -283,7 +314,7 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty // resource deletion is to be ignored, the resource is not removed from // the cache and the corresponding OnResourceDoesNotExist() callback is // not invoked on the watchers. - if a.serverCfg.IgnoreResourceDeletion { + if a.serverCfg.ServerFeaturesIgnoreResourceDeletion() { if !state.deletionIgnored { state.deletionIgnored = true a.logger.Warningf("Ignoring resource deletion for resource %q of type %q", name, rType.TypeName()) @@ -298,7 +329,8 @@ func (a *authority) updateResourceStateAndScheduleCallbacks(rType xdsresource.Ty state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher - a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) + watcherCnt.Add(1) + funcsToSchedule = append(funcsToSchedule, func(context.Context) { watcher.OnResourceDoesNotExist(done) }) } } } @@ -426,8 +458,8 @@ func (a *authority) newConnectionError(err error) { // Propagate the connection error from the transport layer to all watchers. for watcher := range state.watchers { watcher := watcher - a.serializer.Schedule(func(context.Context) { - watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err)) + a.serializer.TrySchedule(func(context.Context) { + watcher.OnError(xdsresource.NewErrorf(xdsresource.ErrorTypeConnection, "xds: error received from xDS stream: %v", err), func() {}) }) } } @@ -454,7 +486,9 @@ func (a *authority) close() { } func (a *authority) watchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) func() { - a.logger.Debugf("New watch for type %q, resource name %q", rType.TypeName(), resourceName) + if a.logger.V(2) { + a.logger.Infof("New watch for type %q, resource name %q", rType.TypeName(), resourceName) + } a.resourcesMu.Lock() defer a.resourcesMu.Unlock() @@ -471,7 +505,9 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // instruct the transport layer to send a DiscoveryRequest for the same. state := resources[resourceName] if state == nil { - a.logger.Debugf("First watch for type %q, resource name %q", rType.TypeName(), resourceName) + if a.logger.V(2) { + a.logger.Infof("First watch for type %q, resource name %q", rType.TypeName(), resourceName) + } state = &resourceState{ watchers: make(map[xdsresource.ResourceWatcher]bool), md: xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusRequested}, @@ -489,7 +525,7 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w a.logger.Infof("Resource type %q with resource name %q found in cache: %s", rType.TypeName(), resourceName, state.cache.ToJSON()) } resource := state.cache - a.serializer.Schedule(func(context.Context) { watcher.OnUpdate(resource) }) + a.serializer.TrySchedule(func(context.Context) { watcher.OnUpdate(resource, func() {}) }) } return func() { @@ -510,7 +546,9 @@ func (a *authority) watchResource(rType xdsresource.Type, resourceName string, w // There are no more watchers for this resource, delete the state // associated with it, and instruct the transport to send a request // which does not include this resource name. - a.logger.Debugf("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) + if a.logger.V(2) { + a.logger.Infof("Removing last watch for type %q, resource name %q", rType.TypeName(), resourceName) + } delete(resources, resourceName) a.sendDiscoveryRequestLocked(rType, resources) } @@ -540,7 +578,7 @@ func (a *authority) handleWatchTimerExpiryLocked(rType xdsresource.Type, resourc state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher - a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) + a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) } } @@ -566,13 +604,13 @@ func (a *authority) triggerResourceNotFoundForTesting(rType xdsresource.Type, re state.md = xdsresource.UpdateMetadata{Status: xdsresource.ServiceStatusNotExist} for watcher := range state.watchers { watcher := watcher - a.serializer.Schedule(func(context.Context) { watcher.OnResourceDoesNotExist() }) + a.serializer.TrySchedule(func(context.Context) { watcher.OnResourceDoesNotExist(func() {}) }) } } // sendDiscoveryRequestLocked sends a discovery request for the specified // resource type and resource names. Even though this method does not directly -// access the resource cache, it is important that `resourcesMu` be beld when +// access the resource cache, it is important that `resourcesMu` be held when // calling this method to ensure that a consistent snapshot of resource names is // being requested. func (a *authority) sendDiscoveryRequestLocked(rType xdsresource.Type, resources map[string]*resourceState) { @@ -589,7 +627,7 @@ func (a *authority) reportLoad() (*load.Store, func()) { return a.transport.ReportLoad() } -func (a *authority) dumpResources() ([]*v3statuspb.ClientConfig_GenericXdsConfig, error) { +func (a *authority) dumpResources() []*v3statuspb.ClientConfig_GenericXdsConfig { a.resourcesMu.Lock() defer a.resourcesMu.Unlock() @@ -619,7 +657,7 @@ func (a *authority) dumpResources() ([]*v3statuspb.ClientConfig_GenericXdsConfig ret = append(ret, config) } } - return ret, nil + return ret } func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go index 468c5fb31b9b1..144cb5bd7686f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -24,8 +24,6 @@ import ( "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/xds/internal/xdsclient/load" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" - - v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) // XDSClient is a full fledged gRPC client which queries a set of discovery APIs @@ -48,10 +46,6 @@ type XDSClient interface { // the watcher is canceled. Callers need to handle this case. WatchResource(rType xdsresource.Type, resourceName string, watcher xdsresource.ResourceWatcher) (cancel func()) - // DumpResources returns the status of the xDS resources. Returns a map of - // resource type URLs to a map of resource names to resource state. - DumpResources() (*v3statuspb.ClientStatusResponse, error) - ReportLoad(*bootstrap.ServerConfig) (*load.Store, func()) BootstrapConfig() *bootstrap.Config diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go index 8dec8f34b2096..6097e86925e6f 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_new.go @@ -19,9 +19,7 @@ package xdsclient import ( - "bytes" "context" - "encoding/json" "fmt" "sync" "time" @@ -33,44 +31,33 @@ import ( "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" ) -// New returns a new xDS client configured by the bootstrap file specified in env -// variable GRPC_XDS_BOOTSTRAP or GRPC_XDS_BOOTSTRAP_CONFIG. -// -// The returned client is a reference counted singleton instance. This function -// creates a new client only when one doesn't already exist. -// -// The second return value represents a close function which releases the -// caller's reference on the returned client. The caller is expected to invoke -// it once they are done using the client. The underlying client will be closed -// only when all references are released, and it is safe for the caller to -// invoke this close function multiple times. -func New() (XDSClient, func(), error) { - return newRefCountedWithConfig(nil) -} +// NameForServer represents the value to be passed as name when creating an xDS +// client from xDS-enabled gRPC servers. This is a well-known dedicated key +// value, and is defined in gRFC A71. +const NameForServer = "#server" -// NewWithConfig is similar to New, except that it uses the provided bootstrap -// configuration to create the xDS client if and only if the bootstrap -// environment variables are not defined. +// New returns an xDS client configured with bootstrap configuration specified +// by the ordered list: +// - file name containing the configuration specified by GRPC_XDS_BOOTSTRAP +// - actual configuration specified by GRPC_XDS_BOOTSTRAP_CONFIG +// - fallback configuration set using bootstrap.SetFallbackBootstrapConfig // -// The returned client is a reference counted singleton instance. This function -// creates a new client only when one doesn't already exist. +// gRPC client implementations are expected to pass the channel's target URI for +// the name field, while server implementations are expected to pass a dedicated +// well-known value "#server", as specified in gRFC A71. The returned client is +// a reference counted implementation shared among callers using the same name. // // The second return value represents a close function which releases the // caller's reference on the returned client. The caller is expected to invoke // it once they are done using the client. The underlying client will be closed // only when all references are released, and it is safe for the caller to // invoke this close function multiple times. -// -// # Internal Only -// -// This function should ONLY be used by the internal google-c2p resolver. -// DO NOT use this elsewhere. Use New() instead. -func NewWithConfig(config *bootstrap.Config) (XDSClient, func(), error) { - return newRefCountedWithConfig(config) +func New(name string) (XDSClient, func(), error) { + return newRefCounted(name, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) } -// newWithConfig returns a new xdsClient with the given config. -func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { +// newClientImpl returns a new xdsClient with the given config. +func newClientImpl(config *bootstrap.Config, watchExpiryTimeout time.Duration, idleAuthorityDeleteTimeout time.Duration) (*clientImpl, error) { ctx, cancel := context.WithCancel(context.Background()) c := &clientImpl{ done: grpcsync.NewEvent(), @@ -84,13 +71,14 @@ func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration, i } c.logger = prefixLogger(c) - c.logger.Infof("Created client to xDS management server: %s", config.XDSServer) return c, nil } // OptionsForTesting contains options to configure xDS client creation for // testing purposes only. type OptionsForTesting struct { + // Name is a unique name for this xDS client. + Name string // Contents contain a JSON representation of the bootstrap configuration to // be used when creating the xDS client. Contents []byte @@ -114,6 +102,9 @@ type OptionsForTesting struct { // // This function should ONLY be used for testing purposes. func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { + if opts.Name == "" { + return nil, nil, fmt.Errorf("opts.Name field must be non-empty") + } if opts.WatchExpiryTimeout == 0 { opts.WatchExpiryTimeout = defaultWatchExpiryTimeout } @@ -121,49 +112,32 @@ func NewForTesting(opts OptionsForTesting) (XDSClient, func(), error) { opts.AuthorityIdleTimeout = defaultIdleAuthorityDeleteTimeout } - // Normalize the input configuration, as this is used as the key in the map - // of xDS clients created for testing. - buf := bytes.Buffer{} - err := json.Indent(&buf, opts.Contents, "", "") - if err != nil { - return nil, nil, fmt.Errorf("xds: error normalizing JSON: %v", err) + if err := bootstrap.SetFallbackBootstrapConfig(opts.Contents); err != nil { + return nil, nil, err } - opts.Contents = bytes.TrimSpace(buf.Bytes()) + client, cancel, err := newRefCounted(opts.Name, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout) + return client, func() { bootstrap.UnsetFallbackBootstrapConfigForTesting(); cancel() }, err +} +// GetForTesting returns an xDS client created earlier using the given name. +// +// The second return value represents a close function which the caller is +// expected to invoke once they are done using the client. It is safe for the +// caller to invoke this close function multiple times. +// +// # Testing Only +// +// This function should ONLY be used for testing purposes. +func GetForTesting(name string) (XDSClient, func(), error) { clientsMu.Lock() defer clientsMu.Unlock() - var client *clientRefCounted - closeFunc := grpcsync.OnceFunc(func() { - clientsMu.Lock() - defer clientsMu.Unlock() - if client.decrRef() == 0 { - client.close() - delete(clients, string(opts.Contents)) - } - }) - - // If an xDS client exists for the given configuration, increment its - // reference count and return it. - if c := clients[string(opts.Contents)]; c != nil { - c.incrRef() - client = c - return c, closeFunc, nil - } - - // Create a new xDS client for the given configuration - bcfg, err := bootstrap.NewConfigFromContents(opts.Contents) - if err != nil { - return nil, nil, fmt.Errorf("bootstrap config %s: %v", string(opts.Contents), err) - } - cImpl, err := newWithConfig(bcfg, opts.WatchExpiryTimeout, opts.AuthorityIdleTimeout) - if err != nil { - return nil, nil, fmt.Errorf("creating xDS client: %v", err) + c, ok := clients[name] + if !ok { + return nil, nil, fmt.Errorf("xDS client with name %q not found", name) } - client = &clientRefCounted{clientImpl: cImpl, refCount: 1} - clients[string(opts.Contents)] = client - - return client, closeFunc, nil + c.incrRef() + return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil } func init() { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go new file mode 100644 index 0000000000000..1efb4de42eb2e --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client_refcounted.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package xdsclient + +import ( + "fmt" + "sync/atomic" + "time" + + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/xds/bootstrap" +) + +const ( + defaultWatchExpiryTimeout = 15 * time.Second + defaultIdleAuthorityDeleteTimeout = 5 * time.Minute +) + +var ( + // The following functions are no-ops in the actual code, but can be + // overridden in tests to give them visibility into certain events. + xdsClientImplCreateHook = func(string) {} + xdsClientImplCloseHook = func(string) {} +) + +func clientRefCountedClose(name string) { + clientsMu.Lock() + defer clientsMu.Unlock() + + client, ok := clients[name] + if !ok { + logger.Errorf("Attempt to close a non-existent xDS client with name %s", name) + return + } + if client.decrRef() != 0 { + return + } + client.clientImpl.close() + xdsClientImplCloseHook(name) + delete(clients, name) + +} + +// newRefCounted creates a new reference counted xDS client implementation for +// name, if one does not exist already. If an xDS client for the given name +// exists, it gets a reference to it and returns it. +func newRefCounted(name string, watchExpiryTimeout, idleAuthorityTimeout time.Duration) (XDSClient, func(), error) { + clientsMu.Lock() + defer clientsMu.Unlock() + + if c := clients[name]; c != nil { + c.incrRef() + return c, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil + } + + // Create the new client implementation. + config, err := bootstrap.GetConfiguration() + if err != nil { + return nil, nil, fmt.Errorf("xds: failed to get xDS bootstrap config: %v", err) + } + c, err := newClientImpl(config, watchExpiryTimeout, idleAuthorityTimeout) + if err != nil { + return nil, nil, err + } + c.logger.Infof("Created client with name %q and bootstrap configuration:\n %s", name, config) + client := &clientRefCounted{clientImpl: c, refCount: 1} + clients[name] = client + xdsClientImplCreateHook(name) + + logger.Infof("xDS node ID: %s", config.Node().GetId()) + return client, grpcsync.OnceFunc(func() { clientRefCountedClose(name) }), nil +} + +// clientRefCounted is ref-counted, and to be shared by the xds resolver and +// balancer implementations, across multiple ClientConns and Servers. +type clientRefCounted struct { + *clientImpl + + refCount int32 // accessed atomically +} + +func (c *clientRefCounted) incrRef() int32 { + return atomic.AddInt32(&c.refCount, 1) +} + +func (c *clientRefCounted) decrRef() int32 { + return atomic.AddInt32(&c.refCount, -1) +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go index 7321250d6ab26..9f619016a08e1 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl.go @@ -85,17 +85,17 @@ func (c *clientImpl) close() { c.authorityMu.Unlock() c.serializerClose() - for _, f := range c.config.XDSServer.Cleanups { - f() - } - for _, a := range c.config.Authorities { - if a.XDSServer == nil { - // The server for this authority is the top-level one, cleaned up above. - continue - } - for _, f := range a.XDSServer.Cleanups { + for _, s := range c.config.XDSServers() { + for _, f := range s.Cleanups() { f() } } + for _, a := range c.config.Authorities() { + for _, s := range a.XDSServers { + for _, f := range s.Cleanups() { + f() + } + } + } c.logger.Infof("Shutdown") } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go index 69db79ee89137..1ce20fabdf830 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_authority.go @@ -45,14 +45,18 @@ func (c *clientImpl) findAuthority(n *xdsresource.Name) (*authority, func(), err return nil, nil, errors.New("the xds-client is closed") } - config := c.config.XDSServer + config := c.config.XDSServers()[0] if scheme == xdsresource.FederationScheme { - cfg, ok := c.config.Authorities[authority] + authorities := c.config.Authorities() + if authorities == nil { + return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) + } + cfg, ok := authorities[authority] if !ok { return nil, nil, fmt.Errorf("xds: failed to find authority %q", authority) } - if cfg.XDSServer != nil { - config = cfg.XDSServer + if len(cfg.XDSServers) >= 1 { + config = cfg.XDSServers[0] } } @@ -110,7 +114,7 @@ func (c *clientImpl) newAuthorityLocked(config *bootstrap.ServerConfig) (_ *auth serializer: c.serializer, resourceTypeGetter: c.resourceTypes.get, watchExpiryTimeout: c.watchExpiryTimeout, - logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI)), + logger: grpclog.NewPrefixLogger(logger, authorityPrefix(c, config.ServerURI())), }) if err != nil { return nil, fmt.Errorf("creating new authority for config %q: %v", config.String(), err) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go index 8fbc010f743d4..f4d7b0a0115c2 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_dump.go @@ -22,27 +22,32 @@ import ( v3statuspb "github.com/envoyproxy/go-control-plane/envoy/service/status/v3" ) -// DumpResources returns the status and contents of all xDS resources. -func (c *clientImpl) DumpResources() (*v3statuspb.ClientStatusResponse, error) { +// dumpResources returns the status and contents of all xDS resources. +func (c *clientImpl) dumpResources() *v3statuspb.ClientConfig { c.authorityMu.Lock() defer c.authorityMu.Unlock() var retCfg []*v3statuspb.ClientConfig_GenericXdsConfig for _, a := range c.authorities { - cfg, err := a.dumpResources() - if err != nil { - return nil, err - } - retCfg = append(retCfg, cfg...) + retCfg = append(retCfg, a.dumpResources()...) + } + + return &v3statuspb.ClientConfig{ + Node: c.config.Node(), + GenericXdsConfigs: retCfg, } +} - return &v3statuspb.ClientStatusResponse{ - Config: []*v3statuspb.ClientConfig{ - { - // TODO: Populate ClientScope. Need to update go-control-plane dependency. - Node: c.config.NodeProto, - GenericXdsConfigs: retCfg, - }, - }, - }, nil +// DumpResources returns the status and contents of all xDS resources. +func DumpResources() *v3statuspb.ClientStatusResponse { + clientsMu.Lock() + defer clientsMu.Unlock() + + resp := &v3statuspb.ClientStatusResponse{} + for key, client := range clients { + cfg := client.dumpResources() + cfg.ClientScope = key + resp.Config = append(resp.Config, cfg) + } + return resp } diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go index ff2f5e9d67285..b42e43a569763 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_loadreport.go @@ -32,7 +32,7 @@ func (c *clientImpl) ReportLoad(server *bootstrap.ServerConfig) (*load.Store, fu a, err := c.newAuthorityLocked(server) if err != nil { c.authorityMu.Unlock() - c.logger.Infof("xds: failed to connect to the control plane to do load reporting for authority %q: %v", server, err) + c.logger.Warningf("Failed to connect to the management server to report load for authority %q: %v", server, err) return nil, func() {} } // Hold the ref before starting load reporting. diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go index 22b8eb0107c93..b9af85db63a85 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go @@ -44,7 +44,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, if err := c.resourceTypes.maybeRegister(rType); err != nil { logger.Warningf("Watch registered for name %q of type %q which is already registered", rType.TypeName(), resourceName) - c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) return func() {} } @@ -54,7 +54,7 @@ func (c *clientImpl) WatchResource(rType xdsresource.Type, resourceName string, a, unref, err := c.findAuthority(n) if err != nil { logger.Warningf("Watch registered for name %q of type %q, authority %q is not found", rType.TypeName(), resourceName, n.Authority) - c.serializer.Schedule(func(context.Context) { watcher.OnError(err) }) + c.serializer.TrySchedule(func(context.Context) { watcher.OnError(err, func() {}) }) return func() {} } cancelF := a.watchResource(rType, n.String(), watcher) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go new file mode 100644 index 0000000000000..e126107441093 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/internal/internal.go @@ -0,0 +1,25 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal contains functionality internal to the xdsclient package. +package internal + +// The following vars can be overridden by tests. +var ( + // NewADSStream is a function that returns a new ADS stream. + NewADSStream any // func(context.Context, *grpc.ClientConn) (v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient, error) +) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go index 1f266ae20185b..f1e265ee7ddf5 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/load/store.go @@ -174,6 +174,7 @@ func (ls *perClusterStore) CallStarted(locality string) { p, _ = ls.localityRPCCount.LoadOrStore(locality, tp) } p.(*rpcCountData).incrInProgress() + p.(*rpcCountData).incrIssued() } // CallFinished adds one call finished record for the given locality. @@ -248,6 +249,8 @@ type RequestData struct { Errored uint64 // InProgress is the number of requests in flight. InProgress uint64 + // Issued is the total number requests that were sent. + Issued uint64 } // ServerLoadData contains server load data. @@ -296,7 +299,8 @@ func (ls *perClusterStore) stats() *Data { succeeded := countData.loadAndClearSucceeded() inProgress := countData.loadInProgress() errored := countData.loadAndClearErrored() - if succeeded == 0 && inProgress == 0 && errored == 0 { + issued := countData.loadAndClearIssued() + if succeeded == 0 && inProgress == 0 && errored == 0 && issued == 0 { return true } @@ -305,6 +309,7 @@ func (ls *perClusterStore) stats() *Data { Succeeded: succeeded, Errored: errored, InProgress: inProgress, + Issued: issued, }, LoadStats: make(map[string]ServerLoadData), } @@ -339,6 +344,7 @@ type rpcCountData struct { succeeded *uint64 errored *uint64 inProgress *uint64 + issued *uint64 // Map from load desc to load data (sum+count). Loading data from map is // atomic, but updating data takes a lock, which could cause contention when @@ -353,6 +359,7 @@ func newRPCCountData() *rpcCountData { succeeded: new(uint64), errored: new(uint64), inProgress: new(uint64), + issued: new(uint64), } } @@ -384,6 +391,14 @@ func (rcd *rpcCountData) loadInProgress() uint64 { return atomic.LoadUint64(rcd.inProgress) // InProgress count is not clear when reading. } +func (rcd *rpcCountData) incrIssued() { + atomic.AddUint64(rcd.issued, 1) +} + +func (rcd *rpcCountData) loadAndClearIssued() uint64 { + return atomic.SwapUint64(rcd.issued, 0) +} + func (rcd *rpcCountData) addServerLoad(name string, d float64) { loads, ok := rcd.serverLoads.Load(name) if !ok { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go deleted file mode 100644 index f981bfebb582c..0000000000000 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/singleton.go +++ /dev/null @@ -1,115 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package xdsclient - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/internal/envconfig" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/xds/bootstrap" -) - -const ( - defaultWatchExpiryTimeout = 15 * time.Second - defaultIdleAuthorityDeleteTimeout = 5 * time.Minute -) - -var ( - // This is the client returned by New(). It contains one client implementation, - // and maintains the refcount. - singletonMu sync.Mutex - singletonClient *clientRefCounted - - // The following functions are no-ops in the actual code, but can be - // overridden in tests to give them visibility into certain events. - singletonClientImplCreateHook = func() {} - singletonClientImplCloseHook = func() {} -) - -// To override in tests. -var bootstrapNewConfig = bootstrap.NewConfig - -func clientRefCountedClose() { - singletonMu.Lock() - defer singletonMu.Unlock() - - if singletonClient.decrRef() != 0 { - return - } - singletonClient.clientImpl.close() - singletonClientImplCloseHook() - singletonClient = nil -} - -func newRefCountedWithConfig(fallbackConfig *bootstrap.Config) (XDSClient, func(), error) { - singletonMu.Lock() - defer singletonMu.Unlock() - - if singletonClient != nil { - singletonClient.incrRef() - return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil - - } - - // Use fallbackConfig only if bootstrap env vars are unspecified. - var config *bootstrap.Config - if envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == "" { - if fallbackConfig == nil { - return nil, nil, fmt.Errorf("xds: bootstrap env vars are unspecified and provided fallback config is nil") - } - config = fallbackConfig - } else { - var err error - config, err = bootstrapNewConfig() - if err != nil { - return nil, nil, fmt.Errorf("xds: failed to read bootstrap file: %v", err) - } - } - - // Create the new client implementation. - c, err := newWithConfig(config, defaultWatchExpiryTimeout, defaultIdleAuthorityDeleteTimeout) - if err != nil { - return nil, nil, err - } - singletonClient = &clientRefCounted{clientImpl: c, refCount: 1} - singletonClientImplCreateHook() - - logger.Infof("xDS node ID: %s", config.NodeProto.GetId()) - return singletonClient, grpcsync.OnceFunc(clientRefCountedClose), nil -} - -// clientRefCounted is ref-counted, and to be shared by the xds resolver and -// balancer implementations, across multiple ClientConns and Servers. -type clientRefCounted struct { - *clientImpl - - refCount int32 // accessed atomically -} - -func (c *clientRefCounted) incrRef() int32 { - return atomic.AddInt32(&c.refCount, 1) -} - -func (c *clientRefCounted) decrRef() int32 { - return atomic.AddInt32(&c.refCount, -1) -} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go new file mode 100644 index 0000000000000..9acc33cbbf8de --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/internal/internal.go @@ -0,0 +1,25 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package internal contains functionality internal to the transport package. +package internal + +// The following vars can be overridden by tests. +var ( + // GRPCNewClient creates a new gRPC Client. + GRPCNewClient any // func(string, ...grpc.DialOption) (*grpc.ClientConn, error) +) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go index 289fd62cbc75d..e47fdd9846ba6 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go @@ -223,6 +223,7 @@ func (t *Transport) sendLoadStatsRequest(stream lrsStream, loads []*load.Data) e TotalSuccessfulRequests: localityData.RequestStats.Succeeded, TotalRequestsInProgress: localityData.RequestStats.InProgress, TotalErrorRequests: localityData.RequestStats.Errored, + TotalIssuedRequests: localityData.RequestStats.Issued, LoadMetricStats: loadMetricStats, UpstreamEndpointStats: nil, // TODO: populate for per endpoint loads. }) diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go index 421ba78074c0a..0bc0d386802d3 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go @@ -24,6 +24,7 @@ import ( "errors" "fmt" "sync" + "sync/atomic" "time" "google.golang.org/grpc" @@ -35,7 +36,9 @@ import ( "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/xds/bootstrap" "google.golang.org/grpc/keepalive" + xdsclientinternal "google.golang.org/grpc/xds/internal/xdsclient/internal" "google.golang.org/grpc/xds/internal/xdsclient/load" + transportinternal "google.golang.org/grpc/xds/internal/xdsclient/transport/internal" "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" "google.golang.org/protobuf/types/known/anypb" @@ -45,17 +48,23 @@ import ( statuspb "google.golang.org/genproto/googleapis/rpc/status" ) +type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient + +func init() { + transportinternal.GRPCNewClient = grpc.NewClient + xdsclientinternal.NewADSStream = func(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { + return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) + } +} + // Any per-RPC level logs which print complete request or response messages // should be gated at this verbosity level. Other per-RPC level logs which print -// terse output should be at `INFO` and verbosity 2, which corresponds to using -// the `Debugf` method on the logger. +// terse output should be at `INFO` and verbosity 2. const perRPCVerbosityLevel = 9 -type adsStream = v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient - // Transport provides a resource-type agnostic implementation of the xDS // transport protocol. At this layer, resource contents are supposed to be -// opaque blobs which should be be meaningful only to the xDS data model layer +// opaque blobs which should be meaningful only to the xDS data model layer // which is implemented by the `xdsresource` package. // // Under the hood, it owns the gRPC connection to a single management server and @@ -77,7 +86,7 @@ type Transport struct { lrsRunnerDoneCh chan struct{} // To notify exit of LRS goroutine. // These channels enable synchronization amongst the different goroutines - // spawned by the transport, and between asynchorous events resulting from + // spawned by the transport, and between asynchronous events resulting from // receipt of responses from the management server. adsStreamCh chan adsStream // New ADS streams are pushed here. adsRequestCh *buffer.Unbounded // Resource and ack requests are pushed here. @@ -112,7 +121,10 @@ type Transport struct { // cause the transport layer to send an ACK to the management server. A non-nil // error is returned from this function when the data model layer believes // otherwise, and this will cause the transport layer to send a NACK. -type OnRecvHandlerFunc func(update ResourceUpdate) error +// +// The implementation is expected to invoke onDone when local processing of the +// update is complete, i.e. it is consumed by all watchers. +type OnRecvHandlerFunc func(update ResourceUpdate, onDone func()) error // OnSendHandlerFunc is the implementation at the authority, which handles state // changes for the resource watch and stop watch timers accordingly. @@ -135,7 +147,7 @@ type ResourceUpdate struct { type Options struct { // ServerCfg contains all the configuration required to connect to the xDS // management server. - ServerCfg bootstrap.ServerConfig + ServerCfg *bootstrap.ServerConfig // OnRecvHandler is the component which makes ACK/NACK decisions based on // the received resources. // @@ -169,16 +181,9 @@ type Options struct { NodeProto *v3corepb.Node } -// For overriding in unit tests. -var grpcDial = grpc.Dial - // New creates a new Transport. func New(opts Options) (*Transport, error) { switch { - case opts.ServerCfg.ServerURI == "": - return nil, errors.New("missing server URI when creating a new transport") - case opts.ServerCfg.CredsDialOption() == nil: - return nil, errors.New("missing credentials when creating a new transport") case opts.OnRecvHandler == nil: return nil, errors.New("missing OnRecv callback handler when creating a new transport") case opts.OnErrorHandler == nil: @@ -197,11 +202,13 @@ func New(opts Options) (*Transport, error) { Timeout: 20 * time.Second, }), } - cc, err := grpcDial(opts.ServerCfg.ServerURI, dopts...) + grpcNewClient := transportinternal.GRPCNewClient.(func(string, ...grpc.DialOption) (*grpc.ClientConn, error)) + cc, err := grpcNewClient(opts.ServerCfg.ServerURI(), dopts...) if err != nil { // An error from a non-blocking dial indicates something serious. - return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI, err) + return nil, fmt.Errorf("failed to create a transport to the management server %q: %v", opts.ServerCfg.ServerURI(), err) } + cc.Connect() boff := opts.Backoff if boff == nil { @@ -209,7 +216,7 @@ func New(opts Options) (*Transport, error) { } ret := &Transport{ cc: cc, - serverURI: opts.ServerCfg.ServerURI, + serverURI: opts.ServerCfg.ServerURI(), onRecvHandler: opts.OnRecvHandler, onErrorHandler: opts.OnErrorHandler, onSendHandler: opts.OnSendHandler, @@ -263,12 +270,6 @@ func (t *Transport) SendRequest(url string, resources []string) { }) } -func (t *Transport) newAggregatedDiscoveryServiceStream(ctx context.Context, cc *grpc.ClientConn) (adsStream, error) { - // The transport retries the stream with an exponential backoff whenever the - // stream breaks without ever having seen a response. - return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx) -} - // ResourceSendInfo wraps the names and url of resources sent to the management // server. This is used by the `authority` type to start/stop the watch timer // associated with every resource in the update. @@ -298,7 +299,9 @@ func (t *Transport) sendAggregatedDiscoveryServiceRequest(stream adsStream, send if t.logger.V(perRPCVerbosityLevel) { t.logger.Infof("ADS request sent: %v", pretty.ToJSON(req)) } else { - t.logger.Debugf("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) + if t.logger.V(2) { + t.logger.Infof("ADS request sent for type %q, resources: %v, version %q, nonce %q", resourceURL, resourceNames, version, nonce) + } } t.onSendHandler(&ResourceSendInfo{URL: resourceURL, ResourceNames: resourceNames}) return nil @@ -311,8 +314,8 @@ func (t *Transport) recvAggregatedDiscoveryServiceResponse(stream adsStream) (re } if t.logger.V(perRPCVerbosityLevel) { t.logger.Infof("ADS response received: %v", pretty.ToJSON(resp)) - } else { - t.logger.Debugf("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) + } else if t.logger.V(2) { + t.logger.Infof("ADS response received for type %q, version %q, nonce %q", resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce()) } return resp.GetResources(), resp.GetTypeUrl(), resp.GetVersionInfo(), resp.GetNonce(), nil } @@ -328,7 +331,8 @@ func (t *Transport) adsRunner(ctx context.Context) { // We reset backoff state when we successfully receive at least one // message from the server. runStreamWithBackoff := func() error { - stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) + newStream := xdsclientinternal.NewADSStream.(func(context.Context, *grpc.ClientConn) (adsStream, error)) + stream, err := newStream(ctx, t.cc) if err != nil { t.onErrorHandler(err) t.logger.Warningf("Creating new ADS stream failed: %v", err) @@ -341,7 +345,7 @@ func (t *Transport) adsRunner(ctx context.Context) { default: } t.adsStreamCh <- stream - msgReceived := t.recv(stream) + msgReceived := t.recv(ctx, stream) if msgReceived { return backoff.ErrResetBackoff } @@ -461,9 +465,21 @@ func (t *Transport) sendExisting(stream adsStream) (sentNodeProto bool, err erro // recv receives xDS responses on the provided ADS stream and branches out to // message specific handlers. Returns true if at least one message was // successfully received. -func (t *Transport) recv(stream adsStream) bool { +func (t *Transport) recv(ctx context.Context, stream adsStream) bool { + // Initialize the flow control quota for the stream. This helps to block the + // next read until the previous one is consumed by all watchers. + fc := newADSFlowControl() + msgReceived := false for { + // Wait for ADS stream level flow control to be available. + if !fc.wait(ctx) { + if t.logger.V(2) { + t.logger.Infof("ADS stream context canceled") + } + return msgReceived + } + resources, url, rVersion, nonce, err := t.recvAggregatedDiscoveryServiceResponse(stream) if err != nil { // Note that we do not consider it an error if the ADS stream was closed @@ -481,12 +497,13 @@ func (t *Transport) recv(stream adsStream) bool { } msgReceived = true - err = t.onRecvHandler(ResourceUpdate{ + u := ResourceUpdate{ Resources: resources, URL: url, Version: rVersion, - }) - if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { + } + fc.setPending() + if err = t.onRecvHandler(u, fc.onDone); xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceTypeUnsupported { t.logger.Warningf("%v", err) continue } @@ -512,7 +529,9 @@ func (t *Transport) recv(stream adsStream) bool { stream: stream, version: rVersion, }) - t.logger.Debugf("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) + if t.logger.V(2) { + t.logger.Infof("Sending ACK for resource type: %q, version: %q, nonce: %q", url, rVersion, nonce) + } } } @@ -618,3 +637,68 @@ func (t *Transport) Close() { func (t *Transport) ChannelConnectivityStateForTesting() connectivity.State { return t.cc.GetState() } + +// adsFlowControl implements ADS stream level flow control that enables the +// transport to block the reading of the next message off of the stream until +// the previous update is consumed by all watchers. +// +// The lifetime of the flow control is tied to the lifetime of the stream. +type adsFlowControl struct { + logger *grpclog.PrefixLogger + + // Whether the most recent update is pending consumption by all watchers. + pending atomic.Bool + // Channel used to notify when all the watchers have consumed the most + // recent update. Wait() blocks on reading a value from this channel. + readyCh chan struct{} +} + +// newADSFlowControl returns a new adsFlowControl. +func newADSFlowControl() *adsFlowControl { + return &adsFlowControl{readyCh: make(chan struct{}, 1)} +} + +// setPending changes the internal state to indicate that there is an update +// pending consumption by all watchers. +func (fc *adsFlowControl) setPending() { + fc.pending.Store(true) +} + +// wait blocks until all the watchers have consumed the most recent update and +// returns true. If the context expires before that, it returns false. +func (fc *adsFlowControl) wait(ctx context.Context) bool { + // If there is no pending update, there is no need to block. + if !fc.pending.Load() { + // If all watchers finished processing the most recent update before the + // `recv` goroutine made the next call to `Wait()`, there would be an + // entry in the readyCh channel that needs to be drained to ensure that + // the next call to `Wait()` doesn't unblock before it actually should. + select { + case <-fc.readyCh: + default: + } + return true + } + + select { + case <-ctx.Done(): + return false + case <-fc.readyCh: + return true + } +} + +// onDone indicates that all watchers have consumed the most recent update. +func (fc *adsFlowControl) onDone() { + fc.pending.Store(false) + + select { + // Writes to the readyCh channel should not block ideally. The default + // branch here is to appease the paranoid mind. + case fc.readyCh <- struct{}{}: + default: + if fc.logger.V(2) { + fc.logger.Infof("ADS stream flow control readyCh is full") + } + } +} diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go index 5ac7f03122390..18d47cbc101d7 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/cluster_resource_type.go @@ -111,7 +111,7 @@ func (c *ClusterResourceData) Raw() *anypb.Any { // corresponding to the cluster resource being watched. type ClusterWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - OnUpdate(*ClusterResourceData) + OnUpdate(*ClusterResourceData, OnDoneFunc) // OnError is invoked under different error conditions including but not // limited to the following: @@ -121,28 +121,28 @@ type ClusterWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - OnError(error) + OnError(error, OnDoneFunc) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - OnResourceDoesNotExist() + OnResourceDoesNotExist(OnDoneFunc) } type delegatingClusterWatcher struct { watcher ClusterWatcher } -func (d *delegatingClusterWatcher) OnUpdate(data ResourceData) { +func (d *delegatingClusterWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { c := data.(*ClusterResourceData) - d.watcher.OnUpdate(c) + d.watcher.OnUpdate(c, onDone) } -func (d *delegatingClusterWatcher) OnError(err error) { - d.watcher.OnError(err) +func (d *delegatingClusterWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) } -func (d *delegatingClusterWatcher) OnResourceDoesNotExist() { - d.watcher.OnResourceDoesNotExist() +func (d *delegatingClusterWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) } // WatchCluster uses xDS to discover the configuration associated with the diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go index 775a8aa194231..66c0ae0b2022a 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/endpoints_resource_type.go @@ -54,7 +54,7 @@ type endpointsResourceType struct { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. -func (endpointsResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { +func (endpointsResourceType) Decode(_ *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { name, rc, err := unmarshalEndpointsResource(resource) switch { case name == "": @@ -107,7 +107,7 @@ func (e *EndpointsResourceData) Raw() *anypb.Any { // events corresponding to the endpoints resource being watched. type EndpointsWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - OnUpdate(*EndpointsResourceData) + OnUpdate(*EndpointsResourceData, OnDoneFunc) // OnError is invoked under different error conditions including but not // limited to the following: @@ -117,28 +117,28 @@ type EndpointsWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - OnError(error) + OnError(error, OnDoneFunc) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - OnResourceDoesNotExist() + OnResourceDoesNotExist(OnDoneFunc) } type delegatingEndpointsWatcher struct { watcher EndpointsWatcher } -func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData) { +func (d *delegatingEndpointsWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { e := data.(*EndpointsResourceData) - d.watcher.OnUpdate(e) + d.watcher.OnUpdate(e, onDone) } -func (d *delegatingEndpointsWatcher) OnError(err error) { - d.watcher.OnError(err) +func (d *delegatingEndpointsWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) } -func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist() { - d.watcher.OnResourceDoesNotExist() +func (d *delegatingEndpointsWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) } // WatchEndpoints uses xDS to discover the configuration associated with the diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go index bef1277d220c7..196bb9f873f24 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/filter_chain.go @@ -536,12 +536,12 @@ func (fcm *FilterChainManager) filterChainFromProto(fc *v3listenerpb.FilterChain if name := ts.GetName(); name != transportSocketName { return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) } - any := ts.GetTypedConfig() - if any == nil || any.TypeUrl != version.V3DownstreamTLSContextURL { - return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) + tc := ts.GetTypedConfig() + if tc == nil || tc.TypeUrl != version.V3DownstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", tc.TypeUrl) } downstreamCtx := &v3tlspb.DownstreamTlsContext{} - if err := proto.Unmarshal(any.GetValue(), downstreamCtx); err != nil { + if err := proto.Unmarshal(tc.GetValue(), downstreamCtx); err != nil { return nil, fmt.Errorf("failed to unmarshal DownstreamTlsContext in LDS response: %v", err) } if downstreamCtx.GetRequireSni().GetValue() { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go index 4337e4e063f75..80fa5e6a21ec9 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/listener_resource_type.go @@ -60,12 +60,12 @@ func securityConfigValidator(bc *bootstrap.Config, sc *SecurityConfig) error { return nil } if sc.IdentityInstanceName != "" { - if _, ok := bc.CertProviderConfigs[sc.IdentityInstanceName]; !ok { + if _, ok := bc.CertProviderConfigs()[sc.IdentityInstanceName]; !ok { return fmt.Errorf("identity certificate provider instance name %q missing in bootstrap configuration", sc.IdentityInstanceName) } } if sc.RootInstanceName != "" { - if _, ok := bc.CertProviderConfigs[sc.RootInstanceName]; !ok { + if _, ok := bc.CertProviderConfigs()[sc.RootInstanceName]; !ok { return fmt.Errorf("root certificate provider instance name %q missing in bootstrap configuration", sc.RootInstanceName) } } @@ -144,7 +144,7 @@ func (l *ListenerResourceData) Raw() *anypb.Any { // events corresponding to the listener resource being watched. type ListenerWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - OnUpdate(*ListenerResourceData) + OnUpdate(*ListenerResourceData, OnDoneFunc) // OnError is invoked under different error conditions including but not // limited to the following: @@ -154,28 +154,28 @@ type ListenerWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - OnError(error) + OnError(error, OnDoneFunc) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - OnResourceDoesNotExist() + OnResourceDoesNotExist(OnDoneFunc) } type delegatingListenerWatcher struct { watcher ListenerWatcher } -func (d *delegatingListenerWatcher) OnUpdate(data ResourceData) { +func (d *delegatingListenerWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { l := data.(*ListenerResourceData) - d.watcher.OnUpdate(l) + d.watcher.OnUpdate(l, onDone) } -func (d *delegatingListenerWatcher) OnError(err error) { - d.watcher.OnError(err) +func (d *delegatingListenerWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) } -func (d *delegatingListenerWatcher) OnResourceDoesNotExist() { - d.watcher.OnResourceDoesNotExist() +func (d *delegatingListenerWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) } // WatchListener uses xDS to discover the configuration associated with the diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go index 3b3a8e79c2b92..55cfd6fbb15b2 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/resource_type.go @@ -52,13 +52,19 @@ type Producer interface { WatchResource(rType Type, resourceName string, watcher ResourceWatcher) (cancel func()) } +// OnDoneFunc is a function to be invoked by watcher implementations upon +// completing the processing of a callback from the xDS client. Failure to +// invoke this callback prevents the xDS client from reading further messages +// from the xDS server. +type OnDoneFunc func() + // ResourceWatcher wraps the callbacks to be invoked for different events // corresponding to the resource being watched. type ResourceWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. // The ResourceData parameter needs to be type asserted to the appropriate // type for the resource being watched. - OnUpdate(ResourceData) + OnUpdate(ResourceData, OnDoneFunc) // OnError is invoked under different error conditions including but not // limited to the following: @@ -68,11 +74,11 @@ type ResourceWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - OnError(error) + OnError(error, OnDoneFunc) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - OnResourceDoesNotExist() + OnResourceDoesNotExist(OnDoneFunc) } // TODO: Once the implementation is complete, rename this interface as diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go index 8ce5cb28596e7..ed32abb8333d3 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/route_config_resource_type.go @@ -54,7 +54,7 @@ type routeConfigResourceType struct { // Decode deserializes and validates an xDS resource serialized inside the // provided `Any` proto, as received from the xDS management server. -func (routeConfigResourceType) Decode(opts *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { +func (routeConfigResourceType) Decode(_ *DecodeOptions, resource *anypb.Any) (*DecodeResult, error) { name, rc, err := unmarshalRouteConfigResource(resource) switch { case name == "": @@ -108,7 +108,7 @@ func (r *RouteConfigResourceData) Raw() *anypb.Any { // events corresponding to the route configuration resource being watched. type RouteConfigWatcher interface { // OnUpdate is invoked to report an update for the resource being watched. - OnUpdate(*RouteConfigResourceData) + OnUpdate(*RouteConfigResourceData, OnDoneFunc) // OnError is invoked under different error conditions including but not // limited to the following: @@ -118,28 +118,28 @@ type RouteConfigWatcher interface { // - resource validation error // - ADS stream failure // - connection failure - OnError(error) + OnError(error, OnDoneFunc) // OnResourceDoesNotExist is invoked for a specific error condition where // the requested resource is not found on the xDS management server. - OnResourceDoesNotExist() + OnResourceDoesNotExist(OnDoneFunc) } type delegatingRouteConfigWatcher struct { watcher RouteConfigWatcher } -func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData) { +func (d *delegatingRouteConfigWatcher) OnUpdate(data ResourceData, onDone OnDoneFunc) { rc := data.(*RouteConfigResourceData) - d.watcher.OnUpdate(rc) + d.watcher.OnUpdate(rc, onDone) } -func (d *delegatingRouteConfigWatcher) OnError(err error) { - d.watcher.OnError(err) +func (d *delegatingRouteConfigWatcher) OnError(err error, onDone OnDoneFunc) { + d.watcher.OnError(err, onDone) } -func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist() { - d.watcher.OnResourceDoesNotExist() +func (d *delegatingRouteConfigWatcher) OnResourceDoesNotExist(onDone OnDoneFunc) { + d.watcher.OnResourceDoesNotExist(onDone) } // WatchRouteConfig uses xDS to discover the configuration associated with the diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go index 8ede639abee60..ab024b57c4600 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/xdsresource/unmarshal_cds.go @@ -278,7 +278,7 @@ func dnsHostNameFromCluster(cluster *v3clusterpb.Cluster) (string, error) { // the received Cluster resource. func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, error) { if tsm := cluster.GetTransportSocketMatches(); len(tsm) != 0 { - return nil, fmt.Errorf("unsupport transport_socket_matches field is non-empty: %+v", tsm) + return nil, fmt.Errorf("unsupported transport_socket_matches field is non-empty: %+v", tsm) } // The Cluster resource contains a `transport_socket` field, which contains // a oneof `typed_config` field of type `protobuf.Any`. The any proto @@ -290,12 +290,12 @@ func securityConfigFromCluster(cluster *v3clusterpb.Cluster) (*SecurityConfig, e if name := ts.GetName(); name != transportSocketName { return nil, fmt.Errorf("transport_socket field has unexpected name: %s", name) } - any := ts.GetTypedConfig() - if any == nil || any.TypeUrl != version.V3UpstreamTLSContextURL { - return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", any.TypeUrl) + tc := ts.GetTypedConfig() + if tc == nil || tc.TypeUrl != version.V3UpstreamTLSContextURL { + return nil, fmt.Errorf("transport_socket field has unexpected typeURL: %s", tc.TypeUrl) } upstreamCtx := &v3tlspb.UpstreamTlsContext{} - if err := proto.Unmarshal(any.GetValue(), upstreamCtx); err != nil { + if err := proto.Unmarshal(tc.GetValue(), upstreamCtx); err != nil { return nil, fmt.Errorf("failed to unmarshal UpstreamTlsContext in CDS response: %v", err) } // The following fields from `UpstreamTlsContext` are ignored: @@ -477,7 +477,7 @@ func securityConfigFromCommonTLSContextUsingNewFields(common *v3tlspb.CommonTlsC case len(validationCtx.GetVerifyCertificateHash()) != 0: return nil, fmt.Errorf("unsupported verify_certificate_hash field in CommonTlsContext message: %+v", common) case validationCtx.GetRequireSignedCertificateTimestamp().GetValue(): - return nil, fmt.Errorf("unsupported require_sugned_ceritificate_timestamp field in CommonTlsContext message: %+v", common) + return nil, fmt.Errorf("unsupported require_signed_certificate_timestamp field in CommonTlsContext message: %+v", common) case validationCtx.GetCrl() != nil: return nil, fmt.Errorf("unsupported crl field in CommonTlsContext message: %+v", common) case validationCtx.GetCustomValidatorConfig() != nil: diff --git a/vendor/google.golang.org/grpc/xds/server.go b/vendor/google.golang.org/grpc/xds/server.go index 126aff067c4ca..1fea8c8309367 100644 --- a/vendor/google.golang.org/grpc/xds/server.go +++ b/vendor/google.golang.org/grpc/xds/server.go @@ -43,8 +43,8 @@ const serverPrefix = "[xds-server %p] " var ( // These new functions will be overridden in unit tests. - newXDSClient = func() (xdsclient.XDSClient, func(), error) { - return xdsclient.New() + newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.New(name) } newGRPCServer = func(opts ...grpc.ServerOption) grpcServer { return grpc.NewServer(opts...) @@ -95,11 +95,14 @@ func NewGRPCServer(opts ...grpc.ServerOption) (*GRPCServer, error) { newXDSClient := newXDSClient if s.opts.bootstrapContentsForTesting != nil { // Bootstrap file contents may be specified as a server option for tests. - newXDSClient = func() (xdsclient.XDSClient, func(), error) { - return xdsclient.NewForTesting(xdsclient.OptionsForTesting{Contents: s.opts.bootstrapContentsForTesting}) + newXDSClient = func(name string) (xdsclient.XDSClient, func(), error) { + return xdsclient.NewForTesting(xdsclient.OptionsForTesting{ + Name: name, + Contents: s.opts.bootstrapContentsForTesting, + }) } } - xdsClient, xdsClientClose, err := newXDSClient() + xdsClient, xdsClientClose, err := newXDSClient(xdsclient.NameForServer) if err != nil { return nil, fmt.Errorf("xDS client creation failed: %v", err) } @@ -108,7 +111,7 @@ func NewGRPCServer(opts ...grpc.ServerOption) (*GRPCServer, error) { // Listener resource name template is mandatory on the server side. cfg := xdsClient.BootstrapConfig() - if cfg.ServerListenerResourceNameTemplate == "" { + if cfg.ServerListenerResourceNameTemplate() == "" { xdsClientClose() return nil, errors.New("missing server_listener_resource_name_template in the bootstrap configuration") } @@ -191,7 +194,7 @@ func (s *GRPCServer) Serve(lis net.Listener) error { // string, it will be replaced with the server's listening "IP:port" (e.g., // "0.0.0.0:8080", "[::]:8080"). cfg := s.xdsC.BootstrapConfig() - name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate, lis.Addr().String()) + name := bootstrap.PopulateResourceTemplate(cfg.ServerListenerResourceNameTemplate(), lis.Addr().String()) // Create a listenerWrapper which handles all functionality required by // this particular instance of Serve(). diff --git a/vendor/modules.txt b/vendor/modules.txt index b49a55d88caed..260c1292782dd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -10,7 +10,7 @@ cloud.google.com/go/internal/optional cloud.google.com/go/internal/pubsub cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.9.0 +# cloud.google.com/go/auth v0.9.4 ## explicit; go 1.21 cloud.google.com/go/auth cloud.google.com/go/auth/credentials @@ -29,18 +29,18 @@ cloud.google.com/go/auth/internal/transport/cert # cloud.google.com/go/auth/oauth2adapt v0.2.4 ## explicit; go 1.20 cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/bigtable v1.29.0 -## explicit; go 1.20 +# cloud.google.com/go/bigtable v1.31.0 +## explicit; go 1.21 cloud.google.com/go/bigtable cloud.google.com/go/bigtable/admin/apiv2/adminpb cloud.google.com/go/bigtable/apiv2/bigtablepb cloud.google.com/go/bigtable/bttest cloud.google.com/go/bigtable/internal cloud.google.com/go/bigtable/internal/option -# cloud.google.com/go/compute/metadata v0.5.0 -## explicit; go 1.20 +# cloud.google.com/go/compute/metadata v0.5.1 +## explicit; go 1.21 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v1.2.0 +# cloud.google.com/go/iam v1.2.1 ## explicit; go 1.21 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb @@ -54,7 +54,7 @@ cloud.google.com/go/longrunning/autogen/longrunningpb cloud.google.com/go/monitoring/apiv3/v2 cloud.google.com/go/monitoring/apiv3/v2/monitoringpb cloud.google.com/go/monitoring/internal -# cloud.google.com/go/pubsub v1.42.0 +# cloud.google.com/go/pubsub v1.43.0 ## explicit; go 1.21 cloud.google.com/go/pubsub cloud.google.com/go/pubsub/apiv1 @@ -515,7 +515,7 @@ github.com/cespare/xxhash # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 +# github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 ## explicit; go 1.19 github.com/cncf/xds/go/udpa/annotations github.com/cncf/xds/go/udpa/type/v1 @@ -742,10 +742,12 @@ github.com/fluent/fluent-bit-go/output # github.com/fsnotify/fsnotify v1.7.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify -# github.com/fsouza/fake-gcs-server v1.7.0 -## explicit +# github.com/fsouza/fake-gcs-server v1.50.0 +## explicit; go 1.22 github.com/fsouza/fake-gcs-server/fakestorage github.com/fsouza/fake-gcs-server/internal/backend +github.com/fsouza/fake-gcs-server/internal/checksum +github.com/fsouza/fake-gcs-server/internal/notification # github.com/gabriel-vasile/mimetype v1.4.3 ## explicit; go 1.20 github.com/gabriel-vasile/mimetype @@ -889,7 +891,7 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/google/btree v1.1.2 +# github.com/google/btree v1.1.3 ## explicit; go 1.18 github.com/google/btree # github.com/google/gnostic-models v0.6.8 @@ -945,7 +947,7 @@ github.com/google/s2a-go/stream # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.3.2 +# github.com/googleapis/enterprise-certificate-proxy v0.3.4 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util @@ -971,6 +973,9 @@ github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1 github.com/gophercloud/gophercloud/openstack/identity/v3/tokens github.com/gophercloud/gophercloud/openstack/utils github.com/gophercloud/gophercloud/pagination +# github.com/gorilla/handlers v1.5.2 +## explicit; go 1.20 +github.com/gorilla/handlers # github.com/gorilla/mux v1.8.1 ## explicit; go 1.20 github.com/gorilla/mux @@ -1377,6 +1382,9 @@ github.com/pkg/browser # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors +# github.com/pkg/xattr v0.4.10 +## explicit; go 1.14 +github.com/pkg/xattr # github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 ## explicit; go 1.20 github.com/planetscale/vtprotobuf/protohelpers @@ -1746,16 +1754,17 @@ go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp # go.opentelemetry.io/collector/semconv v0.105.0 ## explicit; go 1.21.0 go.opentelemetry.io/collector/semconv/v1.6.1 -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.28.0 +# go.opentelemetry.io/otel v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1770,20 +1779,19 @@ go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.18.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 -go.opentelemetry.io/otel/semconv/v1.24.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/metric v1.28.0 +# go.opentelemetry.io/otel/metric v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.28.0 +# go.opentelemetry.io/otel/sdk v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource -# go.opentelemetry.io/otel/sdk/metric v1.28.0 +# go.opentelemetry.io/otel/sdk/metric v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/sdk/metric go.opentelemetry.io/otel/sdk/metric/internal @@ -1791,7 +1799,7 @@ go.opentelemetry.io/otel/sdk/metric/internal/aggregate go.opentelemetry.io/otel/sdk/metric/internal/exemplar go.opentelemetry.io/otel/sdk/metric/internal/x go.opentelemetry.io/otel/sdk/metric/metricdata -# go.opentelemetry.io/otel/trace v1.28.0 +# go.opentelemetry.io/otel/trace v1.29.0 ## explicit; go 1.21 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -1817,7 +1825,7 @@ go.uber.org/zap/zapgrpc # go4.org/netipx v0.0.0-20230125063823-8449b0a6169f ## explicit; go 1.18 go4.org/netipx -# golang.org/x/crypto v0.26.0 +# golang.org/x/crypto v0.27.0 ## explicit; go 1.20 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt @@ -1844,7 +1852,7 @@ golang.org/x/exp/slices # golang.org/x/mod v0.19.0 ## explicit; go 1.18 golang.org/x/mod/semver -# golang.org/x/net v0.28.0 +# golang.org/x/net v0.29.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/context @@ -1866,7 +1874,7 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.22.0 +# golang.org/x/oauth2 v0.23.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1884,7 +1892,7 @@ golang.org/x/oauth2/jwt golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.24.0 +# golang.org/x/sys v0.25.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 @@ -1892,10 +1900,10 @@ golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc/eventlog -# golang.org/x/term v0.23.0 +# golang.org/x/term v0.24.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.17.0 +# golang.org/x/text v0.18.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/encoding @@ -1940,7 +1948,7 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/api v0.193.0 +# google.golang.org/api v0.198.0 ## explicit; go 1.21 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 @@ -1961,13 +1969,13 @@ google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/genproto v0.0.0-20240820151423-278611b39280 +# google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/googleapis/type/calendarperiod google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/api -# google.golang.org/genproto/googleapis/api v0.0.0-20240820151423-278611b39280 +# google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations @@ -1976,12 +1984,12 @@ google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240820151423-278611b39280 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.65.0 +# google.golang.org/grpc v1.67.0 ## explicit; go 1.21 google.golang.org/grpc google.golang.org/grpc/attributes @@ -2024,7 +2032,9 @@ google.golang.org/grpc/encoding google.golang.org/grpc/encoding/gzip google.golang.org/grpc/encoding/proto google.golang.org/grpc/experimental +google.golang.org/grpc/experimental/stats google.golang.org/grpc/grpclog +google.golang.org/grpc/grpclog/internal google.golang.org/grpc/health google.golang.org/grpc/health/grpc_health_v1 google.golang.org/grpc/internal @@ -2068,6 +2078,7 @@ google.golang.org/grpc/internal/xds/bootstrap/tlscreds google.golang.org/grpc/internal/xds/matcher google.golang.org/grpc/internal/xds/rbac google.golang.org/grpc/keepalive +google.golang.org/grpc/mem google.golang.org/grpc/metadata google.golang.org/grpc/orca google.golang.org/grpc/orca/internal @@ -2105,8 +2116,10 @@ google.golang.org/grpc/xds/internal/resolver google.golang.org/grpc/xds/internal/resolver/internal google.golang.org/grpc/xds/internal/server google.golang.org/grpc/xds/internal/xdsclient +google.golang.org/grpc/xds/internal/xdsclient/internal google.golang.org/grpc/xds/internal/xdsclient/load google.golang.org/grpc/xds/internal/xdsclient/transport +google.golang.org/grpc/xds/internal/xdsclient/transport/internal google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry google.golang.org/grpc/xds/internal/xdsclient/xdslbregistry/converter google.golang.org/grpc/xds/internal/xdsclient/xdsresource