From a081ed7a1ac0330df22dc9a39018cf44f213f58d Mon Sep 17 00:00:00 2001 From: Yusuke Kato Date: Thu, 12 Sep 2024 16:35:31 +0900 Subject: [PATCH] Add UpdateTimestamp API (#2605) Signed-off-by: kpango --- .cspell.json | 2698 ++++++++++++++++- .gitfiles | 127 +- .github/ISSUE_TEMPLATE/bug_report.md | 8 + .../ISSUE_TEMPLATE/security_issue_report.md | 8 + .github/PULL_REQUEST_TEMPLATE.md | 8 + .github/{dependabot.yml => dependabot.yaml} | 0 .github/helm/values/values-correction.yaml | 2 +- .github/{labeler.yml => labeler.yaml} | 0 ...ontainer.yml => _detect-ci-container.yaml} | 0 .../{_release-pr.yml => _release-pr.yaml} | 2 +- .../workflows/{backport.yml => backport.yaml} | 0 ...build-binaries.yml => build-binaries.yaml} | 2 +- ...build-protobuf.yml => build-protobuf.yaml} | 2 +- .../{chatops-help.yml => chatops-help.yaml} | 0 .../workflows/{chatops.yml => chatops.yaml} | 0 ...check-conflict.yml => check-conflict.yaml} | 2 +- ...deql-analysis.yml => codeql-analysis.yaml} | 4 +- .../workflows/{coverage.yml => coverage.yaml} | 6 +- ...ml => detect-internal-config-changes.yaml} | 0 .../workflows/dockers-agent-faiss-image.yaml | 104 +- .github/workflows/dockers-agent-image.yaml | 108 +- .../workflows/dockers-agent-ngt-image.yaml | 100 +- .../dockers-agent-sidecar-image.yaml | 100 +- .../dockers-benchmark-job-image.yaml | 100 +- .../dockers-benchmark-operator-image.yaml | 100 +- .github/workflows/dockers-binfmt-image.yaml | 90 +- .../workflows/dockers-buildbase-image.yaml | 90 +- .github/workflows/dockers-buildkit-image.yaml | 90 +- .../dockers-buildkit-syft-scanner-image.yaml | 90 +- .../workflows/dockers-ci-container-image.yaml | 68 +- .../dockers-dev-container-image.yaml | 68 +- .../dockers-discoverer-k8s-image.yaml | 92 +- .../dockers-gateway-filter-image.yaml | 100 +- .../workflows/dockers-gateway-lb-image.yaml | 100 +- .../dockers-gateway-mirror-image.yaml | 96 +- .../dockers-helm-operator-image.yaml | 84 +- ...image-scan.yml => dockers-image-scan.yaml} | 0 .../dockers-index-correction-image.yaml | 96 +- .../dockers-index-creation-image.yaml | 96 +- .../dockers-index-operator-image.yaml | 92 +- .../workflows/dockers-index-save-image.yaml | 96 +- .github/workflows/dockers-loadtest-image.yaml | 96 +- .../dockers-manager-index-image.yaml | 96 +- .../dockers-readreplica-rotate-image.yaml | 92 +- .github/workflows/e2e-chaos.yaml | 2 +- .github/workflows/e2e-code-bench-agent.yaml | 6 +- .../{e2e-max-dim.yml => e2e-max-dim.yaml} | 2 +- .../{e2e-profiling.yml => e2e-profiling.yaml} | 2 +- .github/workflows/{e2e.yml => e2e.yaml} | 2 +- .github/workflows/{format.yml => format.yaml} | 2 +- .github/workflows/{fossa.yml => fossa.yaml} | 2 +- .../{helm-lint.yml => helm-lint.yaml} | 2 +- .github/workflows/{helm.yml => helm.yaml} | 2 +- .../workflows/{labeler.yml => labeler.yaml} | 1 + .../workflows/{release.yml => release.yaml} | 0 ...g-hadolint.yml => reviewdog-hadolint.yaml} | 0 .../{reviewdog-k8s.yml => reviewdog-k8s.yaml} | 2 +- ...g-markdown.yml => reviewdog-markdown.yaml} | 0 .../{reviewdog.yml => reviewdog.yaml} | 4 +- .github/workflows/semver-major-minor.yaml | 2 +- .github/workflows/semver-patch.yaml | 2 +- .../{test-hack.yml => test-hack.yaml} | 6 +- .github/workflows/unit-test.yaml | 6 +- .github/workflows/update-actions.yaml | 2 +- ...date-pull-request-and-issue-template.yaml} | 0 ...date-web-docs.yml => update-web-docs.yaml} | 0 Makefile | 13 +- Makefile.d/client.mk | 45 - Makefile.d/dependencies.mk | 6 - Makefile.d/docker.mk | 27 + Makefile.d/e2e.mk | 43 +- Makefile.d/functions.mk | 2 +- Makefile.d/tools.mk | 29 +- apis/docs/v1/docs.md | 24 +- .../v1/agent/sidecar/sidecar_vtproto.pb.go | 2 +- apis/grpc/v1/payload/payload.pb.go | 1330 ++++++-- apis/grpc/v1/payload/payload.pb.json.go | 10 + apis/grpc/v1/payload/payload_vtproto.pb.go | 243 ++ apis/grpc/v1/vald/update.pb.go | 46 +- apis/grpc/v1/vald/update_vtproto.pb.go | 45 + apis/grpc/v1/vald/vald.go | 1 + apis/proto/v1/payload/payload.proto | 10 + apis/proto/v1/vald/update.proto | 8 + apis/swagger/v1/vald/update.swagger.json | 51 + charts/vald-benchmark-operator/README.md | 2 +- .../schemas/job-values.yaml | 2 +- .../templates/deployment.yaml | 2 +- charts/vald-benchmark-operator/values.yaml | 2 +- charts/vald/README.md | 11 +- charts/vald/values.schema.json | 20 +- charts/vald/values.yaml | 2 +- codecov.yml => codecov.yaml | 0 dockers/agent/core/agent/Dockerfile | 11 +- dockers/agent/core/faiss/Dockerfile | 12 +- dockers/agent/core/ngt/Dockerfile | 12 +- dockers/agent/sidecar/Dockerfile | 12 +- dockers/binfmt/Dockerfile | 2 +- dockers/buildbase/Dockerfile | 2 +- dockers/buildkit/Dockerfile | 2 +- dockers/ci/base/Dockerfile | 15 +- dockers/dev/Dockerfile | 15 +- dockers/discoverer/k8s/Dockerfile | 12 +- dockers/gateway/filter/Dockerfile | 12 +- dockers/gateway/lb/Dockerfile | 12 +- dockers/gateway/mirror/Dockerfile | 12 +- dockers/index/job/correction/Dockerfile | 12 +- dockers/index/job/creation/Dockerfile | 12 +- .../index/job/readreplica/rotate/Dockerfile | 12 +- dockers/index/job/save/Dockerfile | 12 +- dockers/index/operator/Dockerfile | 12 +- dockers/manager/index/Dockerfile | 12 +- dockers/operator/helm/Dockerfile | 12 +- dockers/tools/benchmark/job/Dockerfile | 12 +- dockers/tools/benchmark/operator/Dockerfile | 12 +- dockers/tools/cli/loadtest/Dockerfile | 12 +- docs/contributing/unit-test-guideline.md | 2 +- .../observability-configuration.md | 2 +- example/client/go.mod | 28 +- example/client/go.mod.default | 2 +- example/client/go.sum | 28 +- go.mod | 214 +- go.sum | 472 +-- hack/cspell/main.go | 349 +++ hack/cspell/main_test.go | 532 ++++ hack/docker/gen/main.go | 40 +- hack/go.mod.default | 2 +- internal/backoff/backoff.go | 16 +- internal/backoff/backoff_test.go | 18 +- internal/cache/gache/option_test.go | 18 +- internal/cache/option.go | 4 +- internal/circuitbreaker/breaker.go | 4 +- internal/circuitbreaker/breaker_test.go | 54 +- internal/circuitbreaker/options.go | 2 +- internal/client/v1/client/vald/vald.go | 34 + internal/client/v1/client/vald/vald_test.go | 440 +++ internal/compress/gob_test.go | 6 +- internal/compress/lz4_test.go | 8 +- internal/config/cassandra_test.go | 2 +- internal/config/faiss_test.go | 4 + internal/config/log.go | 2 +- internal/core/algorithm/ngt/ngt_test.go | 16 +- internal/db/rdb/mysql/dbr/dbr.go | 2 +- internal/db/rdb/mysql/dbr/insert.go | 4 +- internal/db/rdb/mysql/dbr/session.go | 4 +- internal/db/rdb/mysql/dbr/tx.go | 2 +- internal/db/rdb/mysql/mysql_test.go | 22 +- internal/db/rdb/mysql/option.go | 4 +- .../db/storage/blob/cloudstorage/option.go | 2 +- internal/db/storage/blob/s3/reader/option.go | 2 +- internal/db/storage/blob/s3/s3_test.go | 2 +- .../storage/blob/s3/session/session_test.go | 4 +- internal/errors/agent.go | 46 + internal/errors/agent_test.go | 1405 ++++++++- internal/errors/corrector.go | 5 +- internal/errors/grpc.go | 2 +- internal/errors/net.go | 2 +- internal/errors/ngt.go | 29 - internal/errors/ngt_test.go | 1405 --------- internal/errors/option_test.go | 10 +- internal/errors/redis.go | 2 +- internal/errors/redis_test.go | 36 +- internal/errors/tls.go | 4 +- internal/errors/vald.go | 2 +- internal/info/info.go | 2 +- internal/log/level/level.go | 52 +- internal/log/option_test.go | 6 +- internal/net/dialer_test.go | 2 +- .../grpc/interceptor/client/metric/metric.go | 4 +- .../grpc/interceptor/server/metric/metric.go | 4 +- internal/net/http/json/json_test.go | 14 +- internal/params/params_test.go | 4 +- internal/servers/server/option_test.go | 765 +++++ internal/tls/tls.go | 2 +- internal/worker/queue.go | 6 +- internal/worker/queue_option.go | 2 +- k8s/metrics/loki/promtail.yaml | 4 +- pkg/agent/core/faiss/service/faiss.go | 107 +- pkg/agent/core/faiss/service/faiss_test.go | 1227 +++++++- .../core/ngt/handler/grpc/object_test.go | 2 +- pkg/agent/core/ngt/handler/grpc/update.go | 97 + .../core/ngt/handler/grpc/update_test.go | 129 + pkg/agent/core/ngt/service/ngt.go | 351 +-- pkg/agent/core/ngt/service/ngt_test.go | 1144 +++++-- pkg/agent/internal/kvs/kvs.go | 10 +- pkg/agent/internal/kvs/kvs_test.go | 32 +- pkg/agent/internal/memstore/data_manager.go | 217 ++ .../internal/memstore/data_manager_test.go | 493 +++ pkg/agent/internal/vqueue/queue.go | 227 +- pkg/agent/internal/vqueue/queue_test.go | 1035 ++++++- pkg/agent/internal/vqueue/stateful_test.go | 12 +- pkg/gateway/lb/handler/grpc/handler.go | 451 +-- pkg/gateway/lb/handler/grpc/handler_test.go | 274 ++ pkg/gateway/lb/service/gateway.go | 7 +- pkg/gateway/mirror/handler/grpc/handler.go | 10 +- pkg/gateway/mirror/service/mirror.go | 26 +- pkg/gateway/mirror/service/mirror_option.go | 4 +- pkg/gateway/mirror/service/mirror_test.go | 144 +- pkg/gateway/mirror/usecase/vald.go | 8 +- pkg/index/job/correction/service/corrector.go | 788 +++-- .../job/correction/service/corrector_test.go | 781 +++++ pkg/index/job/correction/usecase/corrector.go | 4 +- .../job/readreplica/rotate/service/rotator.go | 4 +- pkg/index/operator/service/operator.go | 6 +- pkg/manager/index/service/indexer.go | 4 +- pkg/tools/benchmark/job/config/config.go | 2 +- .../benchmark/operator/service/operator.go | 4 +- .../operator/service/operator_test.go | 2 +- rust/Cargo.lock | 180 +- rust/bin/agent/src/handler/index.rs | 2 +- rust/bin/agent/src/handler/update.rs | 8 + rust/libs/proto/src/core.v1.tonic.rs | 25 +- rust/libs/proto/src/discoverer.v1.tonic.rs | 25 +- rust/libs/proto/src/filter.egress.v1.tonic.rs | 28 +- .../libs/proto/src/filter.ingress.v1.tonic.rs | 28 +- rust/libs/proto/src/mirror.v1.tonic.rs | 23 +- rust/libs/proto/src/payload.v1.rs | 62 +- rust/libs/proto/src/rpc.v1.rs | 2 +- rust/libs/proto/src/sidecar.v1.tonic.rs | 22 +- rust/libs/proto/src/vald.v1.tonic.rs | 342 +-- rust/rust-toolchain | 2 +- rust/rust-toolchain.toml | 2 +- tests/chaos/chart/README.md | 2 +- tests/e2e/crud/crud_test.go | 4 +- .../agent/core/ngt/service/ngt_e2s_test.go | 4 +- versions/CHAOS_MESH_VERSION | 2 +- versions/CMAKE_VERSION | 2 +- versions/DOCKER_VERSION | 2 +- versions/GOLANGCILINT_VERSION | 2 +- versions/HELM_VERSION | 2 +- versions/KIND_VERSION | 2 +- versions/KUBECTL_VERSION | 2 +- versions/PROMETHEUS_STACK_VERSION | 2 +- versions/PROTOBUF_VERSION | 2 +- versions/RUST_VERSION | 2 +- versions/VALDCLI_VERSION | 1 - versions/actions/ACTIONS_UPLOAD_ARTIFACT | 2 +- versions/actions/GITHUB_CODEQL_ACTION_ANALYZE | 2 +- .../actions/GITHUB_CODEQL_ACTION_AUTOBUILD | 2 +- versions/actions/GITHUB_CODEQL_ACTION_INIT | 2 +- .../actions/GITHUB_CODEQL_ACTION_UPLOAD_SARIF | 2 +- versions/actions/GITHUB_ISSUE_METRICS | 2 +- .../actions/PETER_EVANS_CREATE_PULL_REQUEST | 2 +- .../SHOGO82148_ACTIONS_UPLOAD_RELEASE_ASSET | 2 +- 243 files changed, 16081 insertions(+), 5868 deletions(-) rename .github/{dependabot.yml => dependabot.yaml} (100%) rename .github/{labeler.yml => labeler.yaml} (100%) rename .github/workflows/{_detect-ci-container.yml => _detect-ci-container.yaml} (100%) rename .github/workflows/{_release-pr.yml => _release-pr.yaml} (99%) rename .github/workflows/{backport.yml => backport.yaml} (100%) rename .github/workflows/{build-binaries.yml => build-binaries.yaml} (97%) rename .github/workflows/{build-protobuf.yml => build-protobuf.yaml} (96%) rename .github/workflows/{chatops-help.yml => chatops-help.yaml} (100%) rename .github/workflows/{chatops.yml => chatops.yaml} (100%) rename .github/workflows/{check-conflict.yml => check-conflict.yaml} (98%) rename .github/workflows/{codeql-analysis.yml => codeql-analysis.yaml} (95%) rename .github/workflows/{coverage.yml => coverage.yaml} (93%) rename .github/workflows/{detect-internal-config-changes.yml => detect-internal-config-changes.yaml} (100%) rename .github/workflows/{dockers-image-scan.yml => dockers-image-scan.yaml} (100%) rename .github/workflows/{e2e-max-dim.yml => e2e-max-dim.yaml} (98%) rename .github/workflows/{e2e-profiling.yml => e2e-profiling.yaml} (99%) rename .github/workflows/{e2e.yml => e2e.yaml} (99%) rename .github/workflows/{format.yml => format.yaml} (98%) rename .github/workflows/{fossa.yml => fossa.yaml} (98%) rename .github/workflows/{helm-lint.yml => helm-lint.yaml} (98%) rename .github/workflows/{helm.yml => helm.yaml} (98%) rename .github/workflows/{labeler.yml => labeler.yaml} (99%) rename .github/workflows/{release.yml => release.yaml} (100%) rename .github/workflows/{reviewdog-hadolint.yml => reviewdog-hadolint.yaml} (100%) rename .github/workflows/{reviewdog-k8s.yml => reviewdog-k8s.yaml} (98%) rename .github/workflows/{reviewdog-markdown.yml => reviewdog-markdown.yaml} (100%) rename .github/workflows/{reviewdog.yml => reviewdog.yaml} (94%) rename .github/workflows/{test-hack.yml => test-hack.yaml} (94%) rename .github/workflows/{update-pull-request-and-issue-template.yml => update-pull-request-and-issue-template.yaml} (100%) rename .github/workflows/{update-web-docs.yml => update-web-docs.yaml} (100%) delete mode 100644 Makefile.d/client.mk rename codecov.yml => codecov.yaml (100%) create mode 100644 hack/cspell/main.go create mode 100644 hack/cspell/main_test.go create mode 100644 pkg/agent/internal/memstore/data_manager.go create mode 100644 pkg/agent/internal/memstore/data_manager_test.go delete mode 100644 versions/VALDCLI_VERSION diff --git a/.cspell.json b/.cspell.json index 71ced17ccf..fd2e1e380d 100644 --- a/.cspell.json +++ b/.cspell.json @@ -1,226 +1,2744 @@ { "version": "0.2", "language": "en", - "words": [ - "ACCESSS", + "import": [ + "@cspell/dict-cpp/cspell-ext.json", + "@cspell/dict-docker/cspell-ext.json", + "@cspell/dict-en_us/cspell-ext.json", + "@cspell/dict-fullstack/cspell-ext.json", + "@cspell/dict-git/cspell-ext.json", + "@cspell/dict-golang/cspell-ext.json", + "@cspell/dict-k8s/cspell-ext.json", + "@cspell/dict-makefile/cspell-ext.json", + "@cspell/dict-markdown/cspell-ext.json", + "@cspell/dict-npm/cspell-ext.json", + "@cspell/dict-public-licenses/cspell-ext.json", + "@cspell/dict-rust/cspell-ext.json", + "@cspell/dict-shell/cspell-ext.json" + ], + "ignorePaths": [ + "**/*.ai", + "**/*.drawio", + "**/*.hdf5", + "**/*.key", + "**/*.lock", + "**/*.log", + "**/*.md5", + "**/*.pack", + "**/*.pdf", + "**/*.pem", + "**/*.png", + "**/*.sum", + "**/*.svg", + "**/.cspell.json", + "**/.git/objects/**", + "**/cmd/agent/core/faiss/faiss", + "**/cmd/agent/core/ngt/ngt", + "**/cmd/agent/sidecar/sidecar", + "**/cmd/discoverer/k8s/discoverer", + "**/cmd/gateway/filter/filter", + "**/cmd/gateway/lb/lb", + "**/cmd/gateway/mirror/mirror", + "**/cmd/index/job/correction/index-correction", + "**/cmd/index/job/creation/index-creation", + "**/cmd/index/job/readreplica/rotate/readreplica-rotate", + "**/cmd/index/job/save/index-save", + "**/cmd/index/operator/index-operator", + "**/cmd/manager/index/index", + "**/cmd/tools/benchmark/job/job", + "**/cmd/tools/benchmark/operator/operator", + "**/cmd/tools/cli/loadtest/loadtest", + "**/hack/cspell/**", + "**/internal/core/algorithm/ngt/assets/index", + "**/internal/test/data/agent/ngt/validIndex" + ], + "patterns": [ + { + "name": "Ignore_addr_suffix", + "pattern": "/\\b\\w*addr\\b/" + }, + { + "name": "Ignore_addrs_suffix", + "pattern": "/\\b\\w*addrs\\b/" + }, + { + "name": "Ignore_buf_suffix", + "pattern": "/\\b\\w*buf\\b/" + }, + { + "name": "Ignore_cancel_suffix", + "pattern": "/\\b\\w*cancel\\b/" + }, + { + "name": "Ignore_cfg_suffix", + "pattern": "/\\b\\w*cfg\\b/" + }, + { + "name": "Ignore_ch_suffix", + "pattern": "/\\b\\w*ch\\b/" + }, + { + "name": "Ignore_cnt_suffix", + "pattern": "/\\b\\w*cnt\\b/" + }, + { + "name": "Ignore_conf_suffix", + "pattern": "/\\b\\w*conf\\b/" + }, + { + "name": "Ignore_conn_suffix", + "pattern": "/\\b\\w*conn\\b/" + }, + { + "name": "Ignore_ctx_suffix", + "pattern": "/\\b\\w*ctx\\b/" + }, + { + "name": "Ignore_dim_suffix", + "pattern": "/\\b\\w*dim\\b/" + }, + { + "name": "Ignore_dur_suffix", + "pattern": "/\\b\\w*dur\\b/" + }, + { + "name": "Ignore_env_suffix", + "pattern": "/\\b\\w*env\\b/" + }, + { + "name": "Ignore_err_suffix", + "pattern": "/\\b\\w*err\\b/" + }, + { + "name": "Ignore_error_suffix", + "pattern": "/\\b\\w*error\\b/" + }, + { + "name": "Ignore_errors_suffix", + "pattern": "/\\b\\w*errors\\b/" + }, + { + "name": "Ignore_errs_suffix", + "pattern": "/\\b\\w*errs\\b/" + }, + { + "name": "Ignore_idx_suffix", + "pattern": "/\\b\\w*idx\\b/" + }, + { + "name": "Ignore_len_suffix", + "pattern": "/\\b\\w*len\\b/" + }, + { + "name": "Ignore_mu_suffix", + "pattern": "/\\b\\w*mu\\b/" + }, + { + "name": "Ignore_opt_suffix", + "pattern": "/\\b\\w*opt\\b/" + }, + { + "name": "Ignore_opts_suffix", + "pattern": "/\\b\\w*opts\\b/" + }, + { + "name": "Ignore_pool_suffix", + "pattern": "/\\b\\w*pool\\b/" + }, + { + "name": "Ignore_req_suffix", + "pattern": "/\\b\\w*req\\b/" + }, + { + "name": "Ignore_res_suffix", + "pattern": "/\\b\\w*res\\b/" + }, + { + "name": "Ignore_size_suffix", + "pattern": "/\\b\\w*size\\b/" + }, + { + "name": "Ignore_vec_suffix", + "pattern": "/\\b\\w*vec\\b/" + } + ], + "ignoreRegExpList": [ + "Ignore_addr_suffix", + "Ignore_addrs_suffix", + "Ignore_buf_suffix", + "Ignore_cancel_suffix", + "Ignore_cfg_suffix", + "Ignore_ch_suffix", + "Ignore_cnt_suffix", + "Ignore_conf_suffix", + "Ignore_conn_suffix", + "Ignore_ctx_suffix", + "Ignore_dim_suffix", + "Ignore_dur_suffix", + "Ignore_env_suffix", + "Ignore_err_suffix", + "Ignore_error_suffix", + "Ignore_errors_suffix", + "Ignore_errs_suffix", + "Ignore_idx_suffix", + "Ignore_len_suffix", + "Ignore_mu_suffix", + "Ignore_opt_suffix", + "Ignore_opts_suffix", + "Ignore_pool_suffix", + "Ignore_req_suffix", + "Ignore_res_suffix", + "Ignore_size_suffix", + "Ignore_vec_suffix" + ], + "ignoreWords": [ "AQUASECURITY", - "AUTOBUILD", + "Addrs", + "Atof", + "Atol", + "Autoscaler", + "BINFMT", + "BUILDBASE", + "BUILDKIT", "BUILDX", - "Burstable", + "Bbolt", + "Buildx", + "CAPI", + "CHATOPS", "Capi", + "DISTROLESS", + "DNSA", + "Debugd", + "Debugf", + "Devcontainer", + "EUCJP", + "Errord", "Errorf", + "Eucjp", "FAISS", + "FASTOPEN", + "Faiss", + "Fatald", + "Fnum", + "GACHE", + "GETOBJECT", "GHACTION", + "GOARCH", + "GOBIN", "GOLANGCILINT", - "GOMAXPROCS", - "Godoc", + "GOLINES", + "GOPATH", + "GOPKG", + "GOPRIVATE", + "GOROOT", + "GOTEST", + "GOTESTS", + "Gache", + "Gocqlx", + "Gofumpt", + "Goleak", + "IDRPC", + "INITCONTAINER", + "Idxs", + "Iface", + "Indegree", + "Infod", "Infof", + "Inuse", + "Jaccard", + "KEEPIDLE", + "KEYSPACE", + "KLOG", + "KUBECONFIG", "KUBELINTER", + "KVSDB", + "Keyspace", + "Kvsdb", "LANGUAGETOOL", - "Milli", + "LDFLAGS", + "LOADTEST", + "LOGRUS", + "LOGRUs", + "MNIST", + "Mallocs", + "Mirr", + "Nbits", + "Nocie", "ONNX", - "OTEL", - "PROTOBUF", + "Oneof", + "Outdegree", + "Outf", + "PORTFORWARD", + "Portforward", + "Prost", + "Ptop", + "Pyroscope", + "QUICKACK", + "RDONLY", + "READREPLICA", + "RECVORIGDSTADDR", "REVIEWDOG", - "Roundtripper", + "ROOTDIR", + "RUSTUP", + "Readreplica", + "Rebalance", + "Regist", + "Represets", + "Retryable", + "Reviewdog", + "Rootdir", + "Ruleguard", "SARIF", - "SOFTPROPS", - "Structs", + "SYFT", + "Sjis", + "Stmts", + "Struct", + "Svcs", "TELEPRESENCE", + "TEXTLINT", + "TMPDIR", + "Tgts", + "Tolerations", + "UPSERT", + "Unmarshal", + "Upsert", "VALD", - "VALDCLI", + "VALDRELEASE", + "VECTORIZER", "VHOR", "Vald", + "Vald's", + "Vals", + "Vecs", "Vectorizer", + "Vqueue", + "Warnd", + "Warnf", + "Wrapf", + "ZEROLOG", + "ZSTD", + "Zstd", + "accesskey", "accesslog", - "achive", + "adal", "addrs", + "adipisicing", + "afero", + "aggr", + "ajstarks", + "aknishid", + "akrylysov", + "aliqua", + "aliquip", + "alives", + "amet", + "amqp", + "ando", + "antihax", + "anypb", + "apiextensions", + "apimachinery", "apiserver", - "attirbute", + "appengine", + "aquasecurity", + "armon", + "astcopy", + "astequal", + "atof", + "atol", + "atot", + "aute", + "autobuild", + "autoclean", + "automaxprocs", + "autoremove", + "autorest", + "autoscaler", + "azcore", + "azidentity", + "backoffmetrics", + "batchv", "bbolt", - "boudary", - "brandguidelines", + "bdbs", + "benbjohnson", + "benchjob", + "benchjobs", + "benchmem", + "benchscenario", + "bento", + "beorn", + "bigann", + "binfmt", + "bjns", + "blackfriday", + "bmizerany", + "boolint", + "boombuler", + "brnd", + "buckhash", + "bufbuild", + "buger", "buildbase", + "buildkit", + "buildx", + "bulkinsert", + "burstable", + "bvecs", + "bytefmt", + "bzrignore", + "canceld", + "capi", + "cbmetrics", + "cenkalti", + "cespare", "chatops", - "chrono", + "chunkreader", + "chzyer", + "cillum", "circuitbreaker", + "clientip", + "clientmock", + "clientset", + "cloudfoundry", + "cloudsql", "cloudstorage", "clusterrole", "clusterrolebinding", + "clusterrolebindings", + "clusterroles", + "cmder", + "cmdflag", + "cmps", + "cnts", + "cockroachdb", + "codegen", + "collatz", + "colorstring", + "commandhistory", + "commodo", + "concurency", "configmap", + "configsources", + "conflint", + "consequat", + "consistetncy", "contributorsrc", "conv", + "copylocks", + "corev", + "cpuguy", + "cpuid", "crds", + "creack", + "createandsave", "crlfmt", + "crorg", + "cstring", "ctxio", + "cupidatat", + "customresourcedefinitions", + "cvspq", "daemonset", + "dataspace", + "datelier", + "davecgh", + "dbuild", + "dbus", + "dcmake", + "deafult", + "debg", + "debugd", + "debugf", + "decbytes", + "deepcopy", "deepsource", + "dejavu", + "deletecollection", + "deleter", + "demangle", + "denisenkom", + "deserunt", "devcontainer", + "devcontainers", "devel", + "devigned", + "dgryski", + "dicoverer", + "difflib", + "diskv", + "distroless", + "dnaeon", + "dnsa", + "dockerfiles", + "dolore", + "dotdc", + "dotproduct", + "dpkg", + "dset", + "duis", + "durationpb", + "dylib", + "easyjson", + "ecrud", + "eiusmod", + "elif", + "elit", + "emap", + "emicklei", + "enim", + "envkey", "envoyproxy", + "eoptions", + "errcheck", "errdetails", "errgroup", - "facebookresearch", + "errgroup", + "errord", + "errorf", + "errorln", + "esac", + "eucjp", + "evanphx", + "eventstream", + "excepteur", + "exhaustruct", + "extendee", + "extldflags", + "faild", "faiss", "fastime", + "fastopen", + "fastuuid", + "fatald", + "felixge", + "fgprof", + "finalizer", + "finalizers", + "firestore", + "fitos", + "flamegraph", + "flot", + "fmap", + "fname", + "fnum", + "fogleman", + "fopenmp", + "fortytw", + "fpdf", + "frankban", + "freelist", + "freetype", + "fsnotify", + "fugiat", "fuid", + "funakoshi", + "fvecs", "gache", + "gbackoff", + "gcsblob", + "genproto", "getobject", "getstarted", + "gfortran", + "ghaction", + "ghcrorg", "gitfiles", + "glfw", + "glog", + "goarch", + "goautoneg", + "gobc", + "gobin", + "gobwas", + "gocache", + "goccy", + "gocloud", + "gocql", + "gocqlx", + "gocraft", + "godbus", + "godebug", + "godeltaprof", + "godoc", + "gofpdf", + "gofpdi", + "gofrs", "gofumpt", + "gofuzz", + "gogrep", "goimports", + "gojsonpointer", + "gojsonreference", + "gojsonschema", "golangci", + "golangcilint", "goleak", "golines", - "gongt", + "gomaxprocs", + "gomega", + "gomnd", + "gomodifytags", + "gomodules", + "gonic", "gonum", + "gopath", + "gopkg", + "gopls", + "goprivate", + "goproxy", + "gopter", + "goroot", + "goroutines", "gorules", + "gostub", "gotest", + "gotestfmt", "gotests", "gotmpl", + "gotool", + "gover", + "govet", + "gpgsign", + "graphviz", + "gregjones", + "grimaud", + "groundtruth", + "groupcache", + "grpclog", + "grpcmock", + "grpcreplay", "hadolint", + "hailocab", + "hanwen", + "healthcheck", + "healthz", "helmignore", - "httputil", - "icfg", + "hgignore", + "hiroto", + "hlts", + "hoge", + "honnef", + "horizontalpodautoscalers", + "hostport", + "hrichik", + "hrichiksite", + "httpcache", + "httpfs", + "httphead", + "httpreplay", + "iancoleman", + "ianlancetaylor", + "iblob", + "iconfig", + "idelay", + "idrpc", + "idxs", + "iface", + "igmp", + "imds", + "incididunt", + "inconshreveable", + "indegree", + "indexmapkey", + "indexmapvalue", + "infod", + "infof", + "infoln", + "infometrics", + "ingester", "initcontainer", + "initdb", + "inmemory", "innerproduct", - "ioutil", + "insuffcient", + "inuse", + "irure", + "isatty", + "ivecs", "jaccard", - "japansearch", + "jackc", + "jaegertracing", + "jessevdk", + "jitted", + "jmespath", + "jmoiron", + "joho", + "josharian", + "jrnlw", + "jsonparser", + "jsonpointer", + "jsonreference", + "jstemmer", + "junsei", + "kadowaki", + "kato", + "katz", + "kbps", + "keepalive", + "keepalives", + "keepidle", + "kevindiu", + "keyspace", + "kiichiro", + "kisielk", + "klauspost", + "klog", + "kmrmt", + "koichi", "kosarak", + "kosuke", "kpango", + "kprofefe", + "kubeconfig", "kubelinter", - "kustomization", + "kubeval", "kvsdb", + "kvstore", + "kvvk", + "kylelemons", + "labelmap", + "labore", + "laboris", + "laborum", + "languagetool", + "ldconfig", + "ldflags", + "leaktest", + "leanovate", + "lenmapkey", + "lenmapvalue", + "leodido", + "lfaiss", + "libaec", "libhdf", + "liblapack", + "libomp", + "libopenblas", + "lifecycler", + "liggitt", + "liusy", + "livenesss", + "lngt", "loadbalancer", "loadtest", + "localserial", + "localtime", + "logex", + "logfmt", + "logr", + "logrus", + "ltsv", + "lucasb", "lycorp", + "mailru", + "mallocs", + "mapkey", + "mapvalue", + "mattn", + "matttproud", "maxprocs", - "minio", + "mazumder", + "mcache", + "memstats", + "mertics", + "metas", + "metav", + "metricinterceptor", + "mfridman", + "miette", + "minburst", + "mirr", + "misscheduled", + "mitchellh", + "mktemp", + "mktmp", "mnist", + "mnode", + "moby", + "modocache", + "mollit", + "monochromegane", + "montanaflynn", + "morimoto", + "mountinfo", + "mpod", + "mspan", + "mssqldb", + "mthe", "multiapis", "multicluster", - "nanos", + "munnerz", + "nang", "nbits", - "networkpolicy", + "ncos", + "neighors", + "networkpolicies", + "ngroup", + "ngtd", + "nhooyr", + "niemeyer", + "nindent", "nlist", + "nobic", + "nocie", + "nogce", + "nolint", + "noninteractive", + "nonroot", + "nopvq", "normalizedangle", "normalizedcosine", + "normalizedl", + "normang", + "normcos", + "norml", "nosql", + "nostrud", + "notests", + "npoints", + "ntotal", + "nulla", + "nvim", + "nvimlog", + "nxadm", "nytimes", + "objc", + "objs", + "objx", + "occaecat", + "ocsql", + "officia", + "okamura", "oneof", "onnx", - "otel", - "otlp", + "onsi", + "opencensus", + "opencontainers", + "osdk", + "ospace", + "otelgrpc", + "otlpmetric", + "otlpmetricgrpc", + "otlptrace", + "otlptracegrpc", + "outdegree", + "outf", + "pariatur", + "pbdocs", + "pbgos", + "peakrate", + "persistentvolumeclaims", + "peterbourgon", + "pflag", + "pgio", + "pgmock", + "pgpassfile", + "pgproto", + "pgroup", + "pgservicefile", + "pgtype", + "phpdave", + "pierrec", + "pipefail", + "pipeliner", + "planetscale", + "pmezard", + "poddisruptionbudgets", + "podname", "pogreb", + "poinc", + "pointradius", "portforward", "pprof", - "priorityclass", + "prashantv", + "preriodically", + "priorityclasses", "profefe", + "progressbar", + "proident", "promtail", "prost", "protobuf", "protoc", + "protocolbuffers", + "protohelpers", + "protoimpl", + "protojson", + "protoreflect", "protos", + "protovalidate", + "pstartf", + "pstopf", + "ptop", "pyroscope", + "quantizer", + "quasilyte", + "queryx", + "quickack", + "quicktest", + "quis", + "ratelimit", + "rdonly", "readreplica", + "readyz", "rebalance", "rebalancing", + "recvorigdstaddr", + "regist", + "registerers", + "replayers", "replicasets", + "repr", + "reprehenderit", + "represets", "rerank", - "retrive", + "resered", + "retryable", "reviewdog", + "rgba", + "rintaro", + "rinx", + "roccd", + "rogpeppe", + "rolebindings", + "rootdir", + "roundtripper", + "rpcs", + "ruleguard", + "russross", "rustc", "rustup", - "serviceaccount", + "ruudk", + "sarif", + "saveindex", + "sbinet", + "schollz", + "scylladb", + "secretkey", + "secretmanager", + "semconv", + "sendemail", + "sergi", + "serversscheme", + "serviceaccounts", + "sess", + "sgroup", + "shiraishi", + "shlex", + "shmem", + "shogo", + "shopspring", + "shurcoo", + "signingkey", + "signoff", "singleflight", + "sint", + "sirupsen", + "siyuan", + "sjis", + "skipcq", + "smallscreen", + "snapshotter", + "snapshotv", + "softprops", + "sparsejaccard", + "spdystream", + "spjac", + "sptag", + "sqlexp", + "sqlmock", + "sqlx", + "srvs", + "sspan", + "stackdriver", + "starlark", "statefulset", - "steamsearch", + "statefulsets", + "staticcheck", + "stdinfo", + "stdr", + "stix", + "stmts", + "stockout", + "stoewer", + "storageclass", + "stos", + "strcase", "streaminsert", + "stretchr", + "strg", + "strictgoimports", + "strparse", + "struct", "structs", + "subquantizers", + "substr", "subtests", - "testdata", - "textlintrc", + "sunt", + "svcs", + "syft", + "tabwriter", + "tada", + "tagalign", + "taisuou", + "takuyaymd", + "tcql", + "technote", + "telepresence", + "tempor", + "testfunc", + "textlint", + "tgts", + "thedrow", + "threadcreate", + "timelimit", + "timepicker", "timeutil", + "tlsca", + "tmpdir", + "tmpfs", + "tmpl", + "tmproj", + "tolerations", + "tonistiigi", + "toolsmith", + "tparse", + "traceinterceptor", "traefik", + "treeprint", + "trunc", + "typeparams", + "tzdata", + "udpa", + "ugorji", + "ullamco", + "ultiple", + "unbackupped", + "unixgram", + "unixpacket", + "unmarshal", + "unparam", "upsert", "upserted", "urlopener", "usecase", + "usecases", + "userdefined", "vald", + "vald's", "valdbenchmarkjob", + "valdbenchmarkjobs", + "valdbenchmarkoperator", "valdbenchmarkoperatorrelease", + "valdbenchmarkoperatorreleases", "valdbenchmarkscenario", + "valdbenchmarkscenarios", + "valdchart", + "valdcli", "valdhelmoperatorrelease", - "valdhelmopratorreleases", + "valdhelmoperatorreleases", "valdmirrortarget", "valdmirrortargets", + "valdname", "valdrelease", + "valdreleases", + "vals", + "vankichi", + "vbjs", "vbor", + "vbors", + "vbss", "vdaas", "vdctl", + "vecs", "vectorizer", - "vectorizing", + "velit", + "veniam", + "versin", + "vfsgen", "vhor", + "vhors", + "vmap", + "vmdata", + "vmexe", + "vmhwm", + "vmlck", + "vmlib", + "vmpeak", + "vmpin", + "vmpte", + "vmrss", + "vmstk", + "vmswap", + "volumesnapshot", + "volumesnapshots", + "voluptate", + "voronoi", "vqueue", "vtproto", - "werr", + "warnd", + "warnf", + "warningf", + "warningln", + "wfci", "whitesource", + "wiretype", + "wlhf", + "workdir", + "worktree", + "wrapf", + "wrapperspb", + "xaxis", + "xeipuuv", + "xids", + "xlab", + "xxhash", + "xzvf", "yahoojapan", "yamlfmt", + "yaxes", + "yaxis", + "ykadowak", + "yukawa", + "yusuke", + "zapr", + "zchee", + "zdtd", + "zeebo", + "zerolog", + "zoneinfo", "zstd" ], - "ignoreWords": [ - "CMYK", - "SHOGO", - "TECHNOTE", - "agentd", - "benchmarkd", - "conflint", - "sidecard" - ], - "dictionaries": [ - "softwareTerms", - "misc", - "companies", - "typescript", - "node", - "html", - "css", - "fonts", - "filetypes", - "npm" - ], - "ignorePaths": [ - "**/*.ai", - "**/*.drawio", - "**/*.hdf5", - "**/*.key", - "**/*.lock", - "**/*.log", - "**/*.md5", - "**/*.pack", - "**/*.pdf", - "**/*.pem", - "**/*.png", - "**/*.sum", - "**/*.svg", - "**/.git/objects/**", - "**/cmd/agent/core/faiss/faiss", - "**/cmd/agent/core/ngt/ngt", - "**/cmd/agent/sidecar/sidecar", - "**/cmd/discoverer/k8s/k8s", - "**/cmd/gateway/filter/filter", - "**/cmd/gateway/lb/lb", - "**/cmd/gateway/mirror/mirror", - "**/cmd/index/job/correction/correction", - "**/cmd/index/job/creation/creation", - "**/cmd/index/job/readreplica/rotate/rotate", - "**/cmd/index/job/save/save", - "**/cmd/manager/index/index", - "**/internal/core/algorithm/ngt/assets/index", - "**/internal/test/data/agent/ngt/validIndex" - ] + "ignoreWordsMap": { + ".all-contributorsrc": [ + "Funakoshi", + "Grimaud", + "Hiroto", + "Hrichik", + "Kadowaki", + "Kato", + "Katz", + "Kiichiro", + "Koichi", + "Kosuke", + "Mazumder", + "Morimoto", + "Okamura", + "Rintaro", + "Shiraishi", + "Siyuan", + "YUKAWA", + "Yusuke", + "aknishid", + "ando", + "datelier", + "dotdc", + "hrichiksite", + "junsei", + "kevindiu", + "liusy", + "pgrimaud", + "taisuou", + "takuyaymd", + "thedrow", + "ykadowak", + "zchee" + ], + ".commit_template": ["bento", "tada"], + ".devcontainer/devcontainer.json": [ + "PTRACE", + "commandhistory", + "seccomp", + "zshhistory" + ], + ".devcontainer/postAttachCommand.sh": ["commandhistory"], + ".fossa.yml": ["vdctl"], + ".git/hooks/applypatch-msg.sample": ["commitmsg"], + ".git/hooks/fsmonitor-watchman.sample": [ + "CHLD", + "binmode", + "clockid", + "msys" + ], + ".git/hooks/pre-commit.sample": ["allownonascii"], + ".git/hooks/pre-rebase.sample": ["Hamano", "Junio", "oneline"], + ".git/hooks/pre-receive.sample": ["echoback"], + ".git/hooks/push-to-checkout.sample": ["behaviour"], + ".git/hooks/sendemail-validate.sample": [ + "SENDEMAIL", + "Worktree", + "sendemail", + "worktree" + ], + ".git/hooks/update.sample": [ + "allowdeletetag", + "allowmodifytag", + "allowunannotated", + "newrev", + "oldrev", + "projectdesc" + ], + ".gitattributes": ["contributorsrc"], + ".gitfiles": [ + "CMYK", + "PROTOBUF", + "SHOGO", + "SOFTPROPS", + "TECHNOTE", + "accesslog", + "agentd", + "benchmarkd", + "brandguidelines", + "clusterrolebinding", + "conflint", + "contributorsrc", + "conv", + "darkgray", + "getstarted", + "gitfiles", + "gongt", + "helmignore", + "kosarak", + "kustomization", + "lycorp", + "multicluster", + "networkpolicy", + "nytimes", + "priorityclass", + "promtail", + "serviceaccount", + "sidecard", + "testdata", + "textlintrc", + "tmpl", + "valdmirrortarget", + "vdctl", + "whitesource" + ], + ".github/actions/docker-build/action.yaml": ["opencontainers"], + ".github/actions/notify-slack/action.yaml": ["technote"], + ".github/chatops_commands.md": ["bento"], + ".github/chatops_permissions.yaml": ["datelier", "kevindiu"], + ".github/conflint.yaml": ["kubeval"], + ".github/dependabot.yml": ["gomod"], + ".github/helm/values/values-agent-sidecar.yaml": ["ACCESSKEY", "SECRETKEY"], + ".github/helm/values/values-chaos.yaml": ["serversscheme"], + ".github/helm/values/values-profile.yaml": ["GOMAXPROCS"], + ".github/helm/values/values-readreplica.yaml": ["snapclass"], + ".github/workflows/_docker-image-scan.yaml": [ + "imagename", + "opencontainers" + ], + ".github/workflows/_docker-image.yaml": [ + "DOCKERHUB", + "buildkitd", + "stargz" + ], + ".github/workflows/backport.yml": ["startswith"], + ".github/workflows/build-binaries.yml": ["shogo"], + ".github/workflows/chatops.yml": ["gentest"], + ".github/workflows/detect-internal-config-changes.yml": [ + "INTCFG", + "interal" + ], + ".github/workflows/dockers-gateway-mirror-image.yaml": ["nirror"], + ".github/workflows/e2e-chaos.yaml": ["clusterwide"], + ".github/workflows/e2e-max-dim.yml": ["readlink"], + ".github/workflows/e2e-profiling.yml": ["threadcreate"], + ".github/workflows/fossa.yml": ["urllib"], + ".github/workflows/labeler.yml": ["shortstat"], + ".github/workflows/release.yml": ["goproxy", "softprops"], + ".github/workflows/reviewdog-hadolint.yml": ["Dockerfiles"], + ".github/workflows/reviewdog-k8s.yml": ["CONFLINT", "conflint", "kubeval"], + ".github/workflows/reviewdog-markdown.yml": ["testlint"], + ".github/workflows/test-hack.yml": ["notests", "smallscreen"], + ".github/workflows/unit-test.yaml": ["notests", "smallscreen"], + ".gitignore": ["MSVC", "dylib", "nvim", "nvimlog", "rustc", "rustfmt"], + ".golangci.yml": [ + "asasalint", + "asciicheck", + "bidichk", + "bodyclose", + "contextcheck", + "copylocks", + "cyclop", + "decorder", + "depguard", + "dupl", + "dupword", + "durationcheck", + "errcheck", + "errchkjson", + "errname", + "errorlint", + "execinquery", + "exhaustruct", + "exportloopref", + "forbidigo", + "forcetypeassert", + "ginkgolinter", + "gocheckcompilerdirectives", + "gochecknoglobals", + "gochecknoinits", + "gocognit", + "goconst", + "gocritic", + "gocyclo", + "godox", + "gofmt", + "goheader", + "gomoddirectives", + "gomodguard", + "goprintffuncname", + "gosec", + "gosimple", + "gosmopolitan", + "govet", + "importas", + "ineffassign", + "interfacebloat", + "ireturn", + "loggercheck", + "logrlint", + "makezero", + "megacheck", + "musttag", + "nakedret", + "nestif", + "nilnil", + "nlreturn", + "nolintlint", + "nonamedreturns", + "nosprintfhostport", + "paralleltest", + "prealloc", + "predeclared", + "promlinter", + "rowserrcheck", + "sqlclosecheck", + "staticcheck", + "stylecheck", + "testableexamples", + "testpackage", + "thelper", + "tparallel", + "unconvert", + "unparam", + "usestdlibvars", + "vetshadow", + "wastedassign", + "wrapcheck", + "wslissues" + ], + ".prh.yaml": [ + "Burstable", + "Flamegraph", + "besteffort", + "burstable", + "documentaion", + "flamegraph", + "valdcli" + ], + ".textlintrc": [ + "idrequest", + "mevie", + "rerank", + "sptag", + "subtest", + "vektor" + ], + "CHANGELOG.md": [ + "CFLAGS", + "CXXFLAGS", + "Cellebration", + "Dockerfiles", + "Metas", + "Migratation", + "OSDK", + "Stackdriver", + "Tutotial", + "alogrithm", + "ando", + "apiversion", + "bento", + "bidi", + "bulkinsert", + "cass", + "cheking", + "continous", + "conv", + "createindex", + "deeepsource", + "depentency", + "devcontiner", + "dotdc", + "errorgroup", + "exection", + "exhaustruct", + "exsiting", + "gache's", + "gorountine", + "hrichiksite", + "informations", + "iocopy", + "junsei", + "libquadmath", + "lincense", + "liusy", + "makr", + "malloc", + "minnum", + "multiplatforms", + "nvimlog", + "osdk", + "pacakge", + "pacicked", + "pbdocs", + "performace", + "priorityclasses", + "savedmodel", + "slowloris", + "sptag", + "stackdriver", + "tada", + "takuyaymd", + "tensorlfow", + "tset", + "unkeyed", + "unneccessary", + "valdcli", + "vcache", + "vqueue's", + "workdir", + "yamls", + "ykadowak", + "zchee" + ], + "CONTRIBUTING.md": ["Firstname", "Lastname", "implmentes", "newfeature"], + "Makefile": [ + "BLAS", + "CRORG", + "DBLA", + "DBUILD", + "DCMAKE", + "DFAISS", + "EXTLDFLAGS", + "GHCRORG", + "GOCACHE", + "GOPROXY", + "MAKELISTS", + "NPROCESSORS", + "NUMPANES", + "ONLN", + "Ofast", + "PBDOCS", + "PBGOS", + "PROTOBUF", + "PROTODIRS", + "PROTOS", + "RLENGTH", + "RSTART", + "STDDEV", + "armv", + "copress", + "crlfmt", + "dockerfiles", + "fmerge", + "fopenmp", + "funroll", + "gitfiles", + "gsub", + "laec", + "lgfortran", + "lhdf", + "libfaiss", + "llapack", + "lopenblas", + "lstdc", + "mtune", + "ncpu", + "nproc", + "pthread", + "relro", + "strictgoimports", + "toplevel" + ], + "Makefile.d/bench.mk": ["benchmem", "cpuprofile", "memprofile", "nvim"], + "Makefile.d/build.mk": ["EXTLDFLAGS", "linkmode", "popd", "pushd"], + "Makefile.d/dependencies.mk": [ + "GOCACHE", + "PROTOBUF", + "modcache", + "testcache" + ], + "Makefile.d/docker.mk": [ + "CRORG", + "GHCRORG", + "buildcache", + "mediatypes", + "npmjs" + ], + "Makefile.d/e2e.mk": ["ECRUD"], + "Makefile.d/functions.mk": [ + "APIV", + "PBGOS", + "buildid", + "extldflags", + "modcacherw", + "netgo", + "osusergo", + "trimpath" + ], + "Makefile.d/helm.mk": ["valdmirrortarget", "xzvf"], + "Makefile.d/k3d.mk": ["loadbalancer", "storageclass"], + "Makefile.d/k8s.mk": [ + "CRORG", + "cainjector", + "jaegertracing", + "operatorusing", + "promtail", + "serrver" + ], + "Makefile.d/kind.mk": ["conntrack", "netfilter"], + "Makefile.d/minikube.mk": ["hostpath", "storageclass"], + "Makefile.d/proto.mk": ["PROTOS", "protobufs"], + "Makefile.d/test.mk": [ + "covermode", + "coverprofile", + "cweill", + "gotesttools", + "mfridman", + "notests", + "showteststatus" + ], + "Makefile.d/tools.mk": [ + "DBUILD", + "DCMAKE", + "DHDF", + "DZLIB", + "busa", + "crlfmt", + "fatih", + "gomodifytags", + "goplay", + "haya", + "honnef", + "josharian", + "libz", + "momotaro", + "mvdan", + "segmentio", + "staticcheck", + "strictgoimports", + "tlsv", + "xzvf" + ], + "README.md": [ + "Codacy", + "Funakoshi", + "Grimaud", + "Hiroto", + "Hrichik", + "Kadowaki", + "Kato", + "Katz", + "Kiichiro", + "Koichi", + "Kosuke", + "Mazumder", + "Morimoto", + "Okamura", + "Rintaro", + "Shiraishi", + "Siyuan", + "YUKAWA", + "Yusuke", + "aknishid", + "ando", + "datelier", + "junsei", + "kevindiu", + "liusy", + "lycorp", + "srcset", + "taisuou", + "takuyaymd", + "thedrow", + "zchee" + ], + "apis/docs/v1/docs.md": [ + "Bignum", + "Fixnum", + "STOCKOUT", + "hasn", + "sfixed", + "sint" + ], + "apis/grpc/v1/payload/payload.pb.go": ["wrapperspb"], + "apis/grpc/v1/payload/payload.pb.json.go": ["protojson"], + "apis/grpc/v1/payload/payload_vtproto.pb.go": [ + "Indexmapkey", + "Indexmapvalue", + "Lenmapkey", + "Lenmapvalue", + "mapkey", + "mapvalue", + "postmsg", + "protohelpers", + "vtmsg", + "vtpb", + "wiretype", + "wrapperspb" + ], + "apis/grpc/v1/rpc/errdetails/error_details.pb.go": [ + "STOCKOUT", + "durationpb" + ], + "apis/grpc/v1/rpc/errdetails/error_details.pb.json.go": ["protojson"], + "apis/grpc/v1/rpc/errdetails/error_details_vtproto.pb.go": [ + "Indexmapkey", + "Indexmapvalue", + "Lenmapkey", + "Lenmapvalue", + "durationpb", + "mapkey", + "mapvalue", + "protohelpers", + "wiretype" + ], + "apis/proto/v1/agent/core/agent.proto": ["createandsave"], + "apis/proto/v1/payload/payload.proto": ["objc"], + "apis/proto/v1/rpc/errdetails/error_details.proto": ["STOCKOUT", "objc"], + "apis/swagger/v1/agent/core/agent.swagger.json": ["createandsave"], + "assets/test/templates/common/fill.tmpl": ["uintptr"], + "assets/test/templates/common/function.tmpl": ["Subtests"], + "assets/test/templates/option/function.tmpl": ["Subtests"], + "buf.gen.yaml": ["mfridman", "neoeinstein", "openapiv", "pseudomuto"], + "charts/vald-benchmark-operator/crds/valdbenchmarkjob.yaml": ["vbjs"], + "charts/vald-benchmark-operator/crds/valdbenchmarkoperatorrelease.yaml": [ + "valdbenchmarkoperator", + "valdbenchmarkoperatorreleases", + "vbor", + "vbors" + ], + "charts/vald-benchmark-operator/crds/valdbenchmarkscenario.yaml": ["vbss"], + "charts/vald-benchmark-operator/templates/clusterrole.yaml": [ + "deletecollection" + ], + "charts/vald-helm-operator/README.md": ["readyz"], + "charts/vald-helm-operator/crds/valdhelmoperatorrelease.yaml": ["vhors"], + "charts/vald-helm-operator/templates/clusterrole.yaml": [ + "clusterrolebindings", + "clusterroles", + "customresourcedefinitions", + "horizontalpodautoscalers", + "networkpolicies", + "persistentvolumeclaims", + "poddisruptionbudgets", + "priorityclasses", + "serviceaccounts", + "statefulsets" + ], + "charts/vald-helm-operator/values.yaml": ["readyz"], + "charts/vald-readreplica/Chart.yaml": ["ykadowak"], + "charts/vald-readreplica/templates/deployment.yaml": [ + "valdchart", + "valdname" + ], + "charts/vald-readreplica/templates/hpa.yaml": ["valdchart", "valdname"], + "charts/vald-readreplica/templates/svc.yaml": ["valdchart", "valdname"], + "charts/vald/README.md": ["goroutines"], + "charts/vald/crds/valdmirrortarget.yaml": [ + "valdmirrortarget", + "valdmirrortargets", + "vmts" + ], + "charts/vald/templates/_helpers.tpl": ["envkey", "rsslimit", "vszlimit"], + "charts/vald/templates/gateway/mirror/clusterrole.yaml": [ + "valdmirrortargets" + ], + "charts/vald/templates/index/job/readreplica/rotate/clusterrole.yaml": [ + "persistentvolumeclaims" + ], + "charts/vald/templates/index/job/readreplica/rotate/configmap.yaml": [ + "envkey" + ], + "charts/vald/values.schema.json": ["goroutines"], + "charts/vald/values.yaml": ["goroutines"], + "cmd/tools/cli/benchmark/core/main.go": [ + "pfile", + "vmdata", + "vmexe", + "vmlib", + "vmlock", + "vmpin", + "vmpte", + "vmstack", + "vmswap" + ], + "dockers/binfmt/Dockerfile": ["tonistiigi"], + "dockers/ci/base/Dockerfile": ["graphviz"], + "dockers/ci/base/README.md": ["titile"], + "dockers/dev/Dockerfile": ["gomodifytags", "graphviz", "staticcheck"], + "docs/api/build_proto.md": ["chrono", "nanos", "protos", "rustc"], + "docs/contributing/coding-style.md": [ + "Roundtripper", + "Structs", + "crlfmt", + "httputil", + "ioutil", + "structs", + "subtests" + ], + "docs/overview/about-vald.md": ["rebalancing", "rerank"], + "docs/overview/component/agent.md": ["verctors"], + "docs/overview/component/discoverer.md": ["nodeby"], + "docs/performance/continuous-benchmark.md": ["vbor"], + "docs/performance/loadtest.md": ["GOMAXPROCS", "maxprocs", "streaminsert"], + "docs/tutorial/get-started-with-faiss-agent.md": ["cvspq", "jrnlw"], + "docs/tutorial/get-started.md": [ + "cvspq", + "getstarted", + "jrnlw", + "loadbalancer" + ], + "docs/tutorial/vald-agent-standalone-on-docker.md": [ + "GOMAXPROCS", + "maxprocs" + ], + "docs/tutorial/vald-multicluster-on-k8s.md": [ + "brbsp", + "dnxbb", + "ghlpx", + "gzcr", + "hbklj", + "kgrdf", + "multicluster", + "vjbqx", + "vlmpg", + "wtlcv", + "xmws" + ], + "docs/usecase/usage-example.md": ["vectorizing"], + "docs/user-guides/backup-configuration.md": ["ACCESSS"], + "docs/user-guides/capacity-planning.md": ["Burstable"], + "docs/user-guides/client-api-config.md": ["Milli", "achive", "rerank"], + "docs/user-guides/cluster-role-binding.md": [ + "clusterrolebinding", + "finalizers", + "retrive", + "valdmirrortargets" + ], + "docs/user-guides/deployment.md": ["finalizers", "valdhelmopratorreleases"], + "example/helm/values-standalone-agent-ngt.yaml": ["mnist's"], + "example/helm/values-with-pyroscope.yaml": ["serversscheme"], + "example/helm/values.yaml": ["Agnet", "mnist's", "serversscheme"], + "example/manifest/scylla/configmap.yaml": ["initdb"], + "example/manifest/scylla/job.yaml": ["cqlsh", "initdb"], + "go.mod": [ + "adal", + "afero", + "ajstarks", + "amqp", + "ansiterm", + "antihax", + "appengine", + "armon", + "astcopy", + "astequal", + "autorest", + "azcore", + "azidentity", + "benbjohnson", + "beorn", + "blackfriday", + "bmizerany", + "boombuler", + "buger", + "bytefmt", + "campoy", + "cenkalti", + "cespare", + "chunkreader", + "chzyer", + "cloudfoundry", + "cloudsql", + "cmdflag", + "colorstring", + "configsources", + "cpuguy", + "cpuid", + "creack", + "davecgh", + "dbus", + "dejavu", + "demangle", + "denisenkom", + "devigned", + "dgryski", + "difflib", + "diskv", + "dnaeon", + "easyjson", + "embedmd", + "emicklei", + "errcheck", + "evanphx", + "eventstream", + "fastuuid", + "felixge", + "fgprof", + "filippo", + "firestore", + "flowrate", + "fogleman", + "fortytw", + "fpdf", + "frankban", + "freetype", + "glfw", + "goautoneg", + "gobwas", + "godbus", + "godebug", + "godeltaprof", + "gofpdf", + "gofpdi", + "gofrs", + "gofuzz", + "gogrep", + "gojsonpointer", + "gojsonreference", + "gojsonschema", + "gomega", + "gomodules", + "gonic", + "gostub", + "gotool", + "gover", + "gregjones", + "groupcache", + "grpcreplay", + "hailocab", + "hanwen", + "honnef", + "httpcache", + "httpfs", + "httphead", + "httpreplay", + "httpsnoop", + "iancoleman", + "ianlancetaylor", + "imdario", + "imds", + "inconshreveable", + "isatty", + "jackc", + "jessevdk", + "jmespath", + "jmoiron", + "joho", + "josharian", + "jsonparser", + "jsonpointer", + "jsonreference", + "jstemmer", + "kisielk", + "kyaml", + "kylelemons", + "leaktest", + "leodido", + "liggitt", + "logex", + "logfmt", + "logr", + "lucasb", + "mailru", + "mattn", + "matttproud", + "mergo", + "mitchellh", + "modocache", + "monochromegane", + "montanaflynn", + "mountinfo", + "mssqldb", + "munnerz", + "nhooyr", + "niemeyer", + "nxadm", + "objx", + "ocsql", + "onsi", + "otelhttp", + "otlpmetric", + "otlpmetricgrpc", + "otlptracegrpc", + "peterbourgon", + "pflag", + "pgio", + "pgmock", + "pgpassfile", + "pgproto", + "pgservicefile", + "pgtype", + "phpdave", + "pierrec", + "pmezard", + "prashantv", + "progressbar", + "quicktest", + "ratelimit", + "replayers", + "rogpeppe", + "russross", + "ruudk", + "sbinet", + "schollz", + "secretmanager", + "sergi", + "shlex", + "shopspring", + "shurcoo", + "sirupsen", + "spdystream", + "sqlexp", + "sqlmock", + "sqlx", + "starlark", + "stdinfo", + "stdr", + "stix", + "stoewer", + "strcase", + "strparse", + "tabwriter", + "toolsmith", + "treeprint", + "typeparams", + "udpa", + "ugorji", + "vfsgen", + "xeipuuv", + "xlab", + "xxhash", + "zapr" + ], + "hack/benchmark/assets/x1b/loader.go": ["fname"], + "hack/benchmark/assets/x1b/loader_test.go": ["fname"], + "hack/benchmark/internal/db/nosql/cassandra/cassandra_test.go": ["metas"], + "hack/benchmark/internal/db/nosql/cassandra/testdata.json": [ + "Adipisicing", + "Aliqua", + "Aliquip", + "Amet", + "Aute", + "Cillum", + "Commodo", + "Consequat", + "Cupidatat", + "Deserunt", + "Dolore", + "Duis", + "Eiusmod", + "Elit", + "Enim", + "Excepteur", + "Fugiat", + "Incididunt", + "Irure", + "Labore", + "Laboris", + "Laborum", + "Mollit", + "Nostrud", + "Nulla", + "Occaecat", + "Officia", + "Pariatur", + "Proident", + "Quis", + "Reprehenderit", + "Sint", + "Sunt", + "Tempor", + "Ullamco", + "Velit", + "Veniam", + "Voluptate", + "adipisicing", + "aliqua", + "aliquip", + "amet", + "aute", + "cillum", + "commodo", + "consequat", + "cupidatat", + "deserunt", + "dolore", + "duis", + "eiusmod", + "elit", + "enim", + "excepteur", + "fugiat", + "incididunt", + "irure", + "labore", + "laboris", + "laborum", + "mollit", + "nostrud", + "nulla", + "occaecat", + "officia", + "pariatur", + "proident", + "quis", + "reprehenderit", + "sint", + "sunt", + "tempor", + "ullamco", + "velit", + "veniam", + "voluptate" + ], + "hack/benchmark/internal/starter/agent/core/ngt/option.go": [ + "dtype", + "otype" + ], + "hack/benchmark/src/singleflight/singleflight_bench_test.go": [ + "durs", + "resultsmap", + "singlefligh", + "stdsingleflight" + ], + "hack/docker/gen/main.go": [ + "Inernal", + "TARGETARCH", + "TARGETOS", + "WORKDIR", + "Workdir", + "epkg", + "gomodifytags", + "graphviz", + "tmpl", + "tonistiigi" + ], + "hack/go.mod.default": [ + "adal", + "afero", + "ajstarks", + "amqp", + "antihax", + "appengine", + "armon", + "astcopy", + "astequal", + "autorest", + "azcore", + "azidentity", + "benbjohnson", + "beorn", + "blackfriday", + "bmizerany", + "boombuler", + "buger", + "bytefmt", + "cenkalti", + "cespare", + "chunkreader", + "chzyer", + "cloudfoundry", + "cloudsql", + "cmdflag", + "colorstring", + "configsources", + "cpuguy", + "cpuid", + "creack", + "davecgh", + "dbus", + "dejavu", + "demangle", + "denisenkom", + "devigned", + "dgryski", + "difflib", + "diskv", + "dnaeon", + "easyjson", + "emicklei", + "errcheck", + "evanphx", + "eventstream", + "fastuuid", + "firestore", + "fogleman", + "fortytw", + "fpdf", + "frankban", + "freetype", + "glfw", + "goautoneg", + "gobwas", + "godbus", + "godebug", + "godeltaprof", + "gofpdf", + "gofpdi", + "gofrs", + "gofuzz", + "gogrep", + "gojsonpointer", + "gojsonreference", + "gojsonschema", + "gomega", + "gomodules", + "gonic", + "gostub", + "gotool", + "gover", + "gregjones", + "groupcache", + "grpcreplay", + "hailocab", + "hanwen", + "honnef", + "httpcache", + "httpfs", + "httphead", + "httpreplay", + "iancoleman", + "ianlancetaylor", + "imds", + "inconshreveable", + "isatty", + "jackc", + "jessevdk", + "jmespath", + "jmoiron", + "joho", + "josharian", + "jsonparser", + "jsonpointer", + "jsonreference", + "jstemmer", + "kisielk", + "kylelemons", + "leaktest", + "leodido", + "liggitt", + "logex", + "logfmt", + "logr", + "lucasb", + "mailru", + "mattn", + "matttproud", + "mitchellh", + "modocache", + "monochromegane", + "montanaflynn", + "mountinfo", + "mssqldb", + "munnerz", + "nhooyr", + "niemeyer", + "nxadm", + "objx", + "ocsql", + "onsi", + "otlpmetric", + "otlpmetricgrpc", + "otlptracegrpc", + "peterbourgon", + "pflag", + "pgio", + "pgmock", + "pgpassfile", + "pgproto", + "pgservicefile", + "pgtype", + "phpdave", + "pierrec", + "pmezard", + "prashantv", + "progressbar", + "quicktest", + "replayers", + "rogpeppe", + "russross", + "ruudk", + "sbinet", + "schollz", + "secretmanager", + "sergi", + "shlex", + "shopspring", + "shurcoo", + "sirupsen", + "spdystream", + "sqlexp", + "sqlmock", + "sqlx", + "starlark", + "stdinfo", + "stdr", + "stix", + "stoewer", + "strcase", + "strparse", + "tabwriter", + "toolsmith", + "treeprint", + "typeparams", + "udpa", + "ugorji", + "vfsgen", + "xeipuuv", + "xlab", + "xxhash", + "zapr" + ], + "hack/gorules/rules_test.go": ["analysistest"], + "hack/gorules/testdata/tests.go": ["Fmts", "newname"], + "hack/license/gen/main.go": [ + "Pipefile", + "contributorsrc", + "dirwalk", + "gitmodules", + "helmignore", + "tmpl", + "webp", + "whitesource" + ], + "hack/tools/metrics/main.go": ["lucasb", "vgsvg"], + "internal/backoff/backoff_test.go": ["timelimit"], + "internal/circuitbreaker/breaker_test.go": ["resetted"], + "internal/client/v1/client/client.go": ["Upsertor"], + "internal/client/v1/client/discoverer/discover_test.go": [ + "copylocks", + "govet" + ], + "internal/compress/gzip_option_test.go": ["zdtd"], + "internal/compress/lz4/lz4.go": ["pierrec"], + "internal/compress/zstd_option_test.go": ["zdtd"], + "internal/compress/zstd_test.go": ["decom", "vecotr"], + "internal/config/backup_test.go": ["healthcheck"], + "internal/config/blob.go": ["storaget"], + "internal/config/blob_test.go": ["CLOUDSTORAGECONFIG"], + "internal/config/cassandra.go": ["TLSCA"], + "internal/config/cassandra_test.go": ["localserial"], + "internal/config/compress_test.go": [ + "COMPRESSCORE", + "COMPRESSORREGISTERER" + ], + "internal/config/config.go": ["dnum", "rdst", "snum", "vdst"], + "internal/config/config_test.go": [ + "GETACTUALVALUE", + "GETACTUALVALUES", + "GLOBALCONFIG", + "fname" + ], + "internal/config/faiss.go": ["Voronoi", "subquantizers", "subvector"], + "internal/config/filter_test.go": ["sufix"], + "internal/config/gateway_test.go": ["bmanager", "efilter", "ireplica"], + "internal/config/grpc.go": ["Dail"], + "internal/config/grpc_test.go": [ + "DIALOPTION", + "GRPCCLIENT", + "GRPCCLIENTKEEPALIVE", + "healthcheck" + ], + "internal/config/observability_test.go": ["servicename"], + "internal/config/redis_test.go": ["Timelimit"], + "internal/config/server_test.go": ["GPRC", "GRPCKEEPALIVE"], + "internal/config/sidecar_test.go": ["AGENTSIDECAR"], + "internal/conv/conv.go": ["Atobs"], + "internal/core/algorithm/faiss/Capi.cpp": ["IVFPQ", "xids"], + "internal/core/algorithm/faiss/Capi.h": ["xids"], + "internal/core/algorithm/faiss/faiss.go": [ + "lfaiss", + "ntotal", + "strage", + "xids" + ], + "internal/core/algorithm/faiss/option.go": ["lfaiss"], + "internal/core/algorithm/ngt/Makefile": ["benchmem"], + "internal/core/algorithm/ngt/ngt.go": [ + "bulkinsert", + "bulkremove", + "cstats", + "lngt", + "ospace", + "stdlib" + ], + "internal/core/algorithm/ngt/ngt_test.go": ["bulkinsert", "ospace"], + "internal/core/algorithm/ngt/option.go": [ + "dotp", + "dproduct", + "halffloat", + "innerp", + "iproduct", + "lngt", + "nang", + "nangle", + "ncos", + "ncosine", + "normalizedang", + "normalizedcos", + "sparsejac" + ], + "internal/core/algorithm/ngt/option_test.go": ["nang", "ncos"], + "internal/db/kvs/bbolt/bbolt_test.go": ["testfunc"], + "internal/db/kvs/bbolt/option.go": ["Freelist"], + "internal/db/kvs/bbolt/option_test.go": ["Freelist"], + "internal/db/kvs/pogreb/options.go": ["deafult"], + "internal/db/kvs/pogreb/pogreb.go": ["deafult"], + "internal/db/kvs/redis/delete.go": ["Deleter"], + "internal/db/kvs/redis/hook.go": ["Cmder"], + "internal/db/kvs/redis/option_test.go": ["defult"], + "internal/db/kvs/redis/redis.go": ["Deleter", "Pipeliner"], + "internal/db/kvs/redis/redis_mock.go": ["Cmder", "Pipeliner"], + "internal/db/kvs/redis/redis_test.go": ["cslots", "gotc"], + "internal/db/nosql/cassandra/cassandra.go": [ + "Queryx", + "cmps", + "configuation", + "wlhf" + ], + "internal/db/nosql/cassandra/cassandra_test.go": [ + "Debouncer", + "Queryx", + "cmps", + "dchf", + "selete", + "unavilable", + "wlhf" + ], + "internal/db/nosql/cassandra/delete.go": ["Deleter"], + "internal/db/nosql/cassandra/option.go": [ + "TLSCA", + "eachquorum", + "localone", + "localquorum", + "localserial" + ], + "internal/db/nosql/cassandra/option_test.go": ["TLSCA"], + "internal/db/rdb/mysql/mysql_test.go": ["insertbysql", "loadcontext"], + "internal/db/rdb/mysql/option_test.go": ["valddb", "valdmysql"], + "internal/db/storage/blob/cloudstorage/cloudstorage.go": ["iblob"], + "internal/db/storage/blob/cloudstorage/cloudstorage_test.go": ["iblob"], + "internal/db/storage/blob/cloudstorage/option.go": ["urlstr"], + "internal/db/storage/blob/s3/reader/reader_test.go": ["roop"], + "internal/db/storage/blob/s3/sdk/s3/s3manager/s3manager.go": ["mngr"], + "internal/db/storage/blob/s3/session/session_test.go": [ + "btop", + "forcepathstyle", + "httpclient", + "itop", + "maxretries" + ], + "internal/errors/benchmark.go": [ + "benchjob", + "benchscenario", + "tbenchjob", + "tbenchscenario" + ], + "internal/errors/cassandra.go": ["consistetncy", "tcql"], + "internal/errors/cassandra_test.go": ["consistetncy", "tcql"], + "internal/errors/circuitbreaker.go": ["errstr"], + "internal/errors/compressor.go": ["registerers"], + "internal/errors/compressor_test.go": ["leve", "registerers"], + "internal/errors/errors_test.go": ["Unwarp", "uncomparable", "unwrapd"], + "internal/errors/file.go": ["fitos"], + "internal/errors/file_test.go": ["fitos"], + "internal/errors/lb.go": ["Insuffcient"], + "internal/errors/mysql_test.go": ["vaef"], + "internal/errors/redis.go": ["KVVK"], + "internal/errors/redis_test.go": ["KVVK"], + "internal/errors/vald_test.go": ["tvald"], + "internal/file/file_test.go": ["utiltest"], + "internal/info/info.go": ["procs", "strs"], + "internal/k8s/client/client.go": [ + "applyconfigurations", + "applycorev", + "clientgoscheme", + "snapshotv", + "volumesnapshot" + ], + "internal/k8s/job/job.go": ["batchv"], + "internal/k8s/option.go": ["mertics"], + "internal/k8s/reconciler.go": ["mertics", "mserver"], + "internal/k8s/reconciler_test.go": ["mertics"], + "internal/k8s/types.go": ["appsv", "batchv", "snapshotv", "volumesnapshot"], + "internal/k8s/vald/benchmark/api/v1/job_types.go": ["deepcopy"], + "internal/k8s/vald/benchmark/api/v1/scenario_types.go": ["deepcopy"], + "internal/k8s/vald/mirror/api/v1/target_types.go": ["deepcopy"], + "internal/log/glg/glg.go": ["DEBG", "dstr"], + "internal/log/glg/glg_test.go": ["DEBG"], + "internal/log/level/level.go": ["DEBG", "ERRO", "FATA"], + "internal/log/logger/iface.go": ["finalizer"], + "internal/log/logger/type.go": ["Atot"], + "internal/log/logger/type_test.go": ["Atot", "ZEROL"], + "internal/log/nop/nop.go": ["finalizer"], + "internal/log/option.go": ["Atot"], + "internal/log/retry/retry_test.go": ["foramt", "gotr", "wantr"], + "internal/net/control/control.go": ["boolint"], + "internal/net/control/control_test.go": ["boolint"], + "internal/net/control/control_unix.go": ["uapi"], + "internal/net/dialer.go": ["nport", "tconnectionstate", "tder"], + "internal/net/dialer_test.go": ["Nums", "copylocks", "govet"], + "internal/net/grpc/client.go": ["gbackoff", "parseable", "rebalancing"], + "internal/net/grpc/client_test.go": ["gbackoff"], + "internal/net/grpc/errdetails/errdetails.go": ["iobjs"], + "internal/net/grpc/logger/logger.go": [ + "Errorln", + "Infoln", + "Warningf", + "Warningln", + "grpclog" + ], + "internal/net/grpc/logger/logger_test.go": [ + "Errorln", + "Infoln", + "Warningf", + "Warningln", + "grpclog" + ], + "internal/net/grpc/option.go": [ + "gbackoff", + "metricinterceptor", + "traceinterceptor" + ], + "internal/net/grpc/pool/pool.go": ["tdelay"], + "internal/net/grpc/proto/proto.go": ["protoiface"], + "internal/net/grpc/server_test.go": ["channelz"], + "internal/net/http/client/option.go": ["Alives", "Keepalives"], + "internal/net/http/client/option_test.go": ["Alives", "Keepalives"], + "internal/net/http/dump/dump_test.go": ["hoge"], + "internal/net/http/metrics/pprof.go": [ + "felixge", + "fgprof", + "godeltaprof", + "pyprof", + "threadcreate" + ], + "internal/net/http/middleware/timeout_test.go": ["extermemly"], + "internal/net/http/transport/roundtrip.go": ["roundtripper"], + "internal/net/net.go": ["hostport"], + "internal/net/net_test.go": ["hostport"], + "internal/observability/exporter/otlp/otlp.go": [ + "otlpmetric", + "otlpmetricgrpc", + "otlptracegrpc", + "semconv" + ], + "internal/observability/metrics/grpc/grpc.go": ["Desctiption"], + "internal/observability/metrics/mem/index/index.go": ["mstats"], + "internal/observability/metrics/mem/mem.go": [ + "Memstats", + "Shmem", + "buckhash", + "mcache", + "mspan", + "oinsts", + "shmem", + "toal", + "vmdata", + "vmexe", + "vmlck", + "vmlib", + "vmpin", + "vmpte", + "vmstk", + "vmswap" + ], + "internal/observability/metrics/mem/mem_test.go": ["Memstats"], + "internal/observability/trace/status.go": ["RPCGRPC", "ocodes", "semconv"], + "internal/params/params.go": ["commnad"], + "internal/runner/runner.go": ["maxprocs", "mfunc", "timelocation"], + "internal/safety/safety.go": ["revcover", "runtimer"], + "internal/servers/option.go": ["strg"], + "internal/servers/option_test.go": ["gsrv", "strg"], + "internal/servers/server/option.go": [ + "accesslog", + "accessloginterceptor", + "metricinterceptor", + "recoverinterceptor", + "traceinterceptor" + ], + "internal/servers/server/server_test.go": ["prestart"], + "internal/servers/servers_test.go": ["strg"], + "internal/strings/strings_benchmark_test.go": ["tstr"], + "internal/sync/errgroup/group_test.go": ["acquireings", "goroutne"], + "internal/sync/semaphore/semaphore.go": ["cancelation"], + "internal/sync/semaphore/semaphore_example_test.go": [ + "Collatz", + "collatz", + "nonpositive" + ], + "internal/sync/semaphore/semaphore_test.go": ["Doesnt", "unacquired"], + "internal/sync/singleflight/singleflight.go": ["chans", "dups"], + "internal/sync/singleflight/singleflight_test.go": ["DOCHAN", "unparam"], + "internal/test/data/hdf5/hdf5.go": ["Keepalives", "Neighors"], + "internal/test/data/hdf5/option.go": ["dataname"], + "internal/test/data/vector/gen.go": ["irand"], + "internal/test/mock/grpc_testify_mock.go": ["losm", "usecases"], + "internal/test/mock/k8s/client.go": ["crclient"], + "internal/timeutil/rate/rate.go": ["ratelimit"], + "internal/timeutil/rate/rate_test.go": ["ratelimit"], + "internal/timeutil/time_test.go": ["dummystring", "hoge"], + "internal/unit/unit.go": ["bytefmt", "cloudfoundry"], + "internal/version/version.go": ["curv"], + "internal/worker/worker_test.go": ["testname"], + "k8s/external/minio/deployment.yaml": ["ACCESSKEY", "SECRETKEY"], + "k8s/external/minio/mb-job.yaml": ["ACCESSKEY", "SECRETKEY"], + "k8s/metrics/grafana/dashboards/00-vald-cluster-overview.yaml": [ + "Misscheduled", + "Qxya", + "misscheduled" + ], + "k8s/metrics/grafana/dashboards/02-vald-discoverer.yaml": [ + "Jkemc", + "Versin" + ], + "k8s/metrics/grafana/dashboards/05-vald-index-manager.yaml": ["jowe"], + "k8s/metrics/grafana/dashboards/10-vald-benchmark-operator.yaml": [ + "Versin", + "fdewjfx", + "jkxz" + ], + "k8s/metrics/grafana/dashboards/99-vald-agent-memory.yaml": [ + "Memstats", + "buckhash", + "mcache", + "mspan", + "vmdata", + "vmexe", + "vmlck", + "vmlib", + "vmpin", + "vmpte", + "vmstk", + "vmswap" + ], + "k8s/metrics/jaeger/jaeger.yaml": ["jaegertracing"], + "k8s/metrics/loki/loki.yaml": [ + "boltdb", + "ingester", + "inmemory", + "kvstore", + "lifecycler" + ], + "k8s/metrics/loki/promtail.yaml": [ + "labelmap", + "promtail", + "varlibdockercontainers", + "varlog" + ], + "k8s/metrics/prometheus/configmap.yaml": ["cadvisor", "labelmap"], + "k8s/metrics/pyroscope/README.md": ["mafests"], + "k8s/metrics/pyroscope/base/configmap.yaml": ["labelmap"], + "k8s/metrics/pyroscope/base/daemonset.yaml": ["ebpfspy"], + "k8s/metrics/pyroscope/base/kustomization.yaml": ["clusterrolebinding"], + "k8s/metrics/tempo/jaeger-agent.yaml": ["jaegertracing"], + "k8s/metrics/tempo/tempo.yaml": [ + "blocklist", + "ingester", + "inmemory", + "kvstore", + "lifecycler" + ], + "k8s/operator/helm/clusterrole.yaml": [ + "clusterrolebindings", + "clusterroles", + "customresourcedefinitions", + "horizontalpodautoscalers", + "networkpolicies", + "persistentvolumeclaims", + "poddisruptionbudgets", + "priorityclasses", + "serviceaccounts", + "statefulsets" + ], + "k8s/operator/helm/crds/valdhelmoperatorrelease.yaml": ["vhors"], + "k8s/operator/helm/operator.yaml": ["readyz"], + "k8s/tools/benchmark/job/clusterrolebinding.yaml": ["rolebinding"], + "k8s/tools/benchmark/job/serviceaccount.yaml": ["Versoin"], + "k8s/tools/benchmark/operator/clusterrole.yaml": ["deletecollection"], + "k8s/tools/benchmark/operator/crds/valdbenchmarkjob.yaml": ["vbjs"], + "k8s/tools/benchmark/operator/crds/valdbenchmarkoperatorrelease.yaml": [ + "valdbenchmarkoperator", + "valdbenchmarkoperatorreleases", + "vbor", + "vbors" + ], + "k8s/tools/benchmark/operator/crds/valdbenchmarkscenario.yaml": ["vbss"], + "pkg/agent/core/faiss/handler/grpc/search.go": ["createing"], + "pkg/agent/core/faiss/service/faiss.go": [ + "Voronoi", + "ntotal", + "saveindex", + "subquantizers", + "tpath", + "tvald" + ], + "pkg/agent/core/faiss/service/option.go": ["bdbs", "brnd"], + "pkg/agent/core/faiss/usecase/agentd.go": ["faissmetrics"], + "pkg/agent/core/ngt/handler/grpc/flush.go": ["cnts"], + "pkg/agent/core/ngt/handler/grpc/index_test.go": ["exteneral"], + "pkg/agent/core/ngt/handler/grpc/insert.go": ["vmap"], + "pkg/agent/core/ngt/handler/grpc/insert_test.go": [ + "Testingcase", + "joind", + "nonexistid", + "pushinsert" + ], + "pkg/agent/core/ngt/handler/grpc/object_test.go": ["testfunc", "tmock"], + "pkg/agent/core/ngt/handler/grpc/update.go": ["idis", "vmap"], + "pkg/agent/core/ngt/handler/grpc/update_test.go": ["Testint"], + "pkg/agent/core/ngt/service/ngt.go": [ + "Nopvq", + "nkvs", + "nobic", + "nopvq", + "saveindex", + "toid", + "tvald" + ], + "pkg/agent/core/ngt/service/ngt_test.go": [ + "additionaldigits", + "kvald", + "metafile", + "nobic", + "nopvq", + "testfunc" + ], + "pkg/agent/core/ngt/service/option.go": ["bdbs", "brnd"], + "pkg/agent/core/ngt/usecase/agentd.go": ["memmetrics", "ngtmetrics"], + "pkg/agent/internal/vqueue/queue.go": ["uninserted"], + "pkg/agent/internal/vqueue/stateful_test.go": ["getvector"], + "pkg/agent/sidecar/service/restorer/restorer.go": ["Typeflag"], + "pkg/discoverer/k8s/handler/grpc/handler.go": [ + "ngroup", + "pgroup", + "sgroup" + ], + "pkg/discoverer/k8s/handler/grpc/handler_test.go": [ + "ngroup", + "pgroup", + "sgroup" + ], + "pkg/discoverer/k8s/service/discover.go": [ + "mnode", + "mpod", + "reconciation", + "svcsmap" + ], + "pkg/discoverer/k8s/service/discover_test.go": ["mnode", "mpod"], + "pkg/discoverer/k8s/usecase/discovered.go": ["unbackupped"], + "pkg/gateway/lb/handler/grpc/aggregation.go": [ + "Insuffcient", + "fdist", + "fmax", + "timeoutage" + ], + "pkg/gateway/lb/handler/grpc/handler.go": [ + "cnts", + "indegrees", + "outdegrees" + ], + "pkg/gateway/lb/handler/grpc/handler_test.go": ["Cnts"], + "pkg/gateway/lb/handler/grpc/pairing_heap_test.go": ["gids"], + "pkg/gateway/lb/handler/grpc/search_benchmark_test.go": ["datas"], + "pkg/gateway/mirror/handler/grpc/handler_test.go": ["clientmock", "cmap"], + "pkg/gateway/mirror/service/discovery.go": ["ctgt", "ptgt"], + "pkg/gateway/mirror/service/discovery_option.go": ["datacenter"], + "pkg/gateway/mirror/service/mirror_test.go": ["grpcmock"], + "pkg/gateway/mirror/usecase/vald.go": ["mirrormetrics"], + "pkg/index/job/creation/service/indexer_test.go": [ + "clientmock", + "grpcmock" + ], + "pkg/index/job/save/service/indexer_test.go": ["clientmock", "grpcmock"], + "pkg/manager/index/usecase/indexer.go": ["indexmetrics"], + "pkg/tools/benchmark/job/config/config.go": ["JOBNAME", "JOBNAMESPACE"], + "pkg/tools/benchmark/job/service/insert.go": ["Prometeus"], + "pkg/tools/benchmark/job/service/job.go": ["Neighors", "benchjob"], + "pkg/tools/benchmark/job/service/option.go": ["Concurency", "bjns"], + "pkg/tools/benchmark/job/service/option_test.go": ["Concurency", "bjns"], + "pkg/tools/benchmark/job/usecase/benchmarkd.go": [ + "Concurency", + "gcli", + "unbackupped", + "usecases", + "vcli" + ], + "pkg/tools/benchmark/operator/service/operator.go": [ + "Progation", + "benchjob", + "benchjobs", + "benchmarkjob", + "benchscenario", + "bjob", + "cbjl", + "cbsl", + "cjobs", + "rcticker", + "wating" + ], + "pkg/tools/benchmark/operator/service/operator_test.go": [ + "benchjobs", + "minsit", + "scneario" + ], + "pkg/tools/benchmark/operator/usecase/benchmarkd.go": [ + "benchmarkmetrics", + "unbackupped", + "usecases" + ], + "pkg/tools/cli/loadtest/assets/dataset.go": ["kosarak", "nytimes"], + "pkg/tools/cli/loadtest/assets/hdf5_loader.go": ["dset", "npoints"], + "pkg/tools/cli/loadtest/assets/hdf5_loader_test.go": ["dset", "npoints"], + "pkg/tools/cli/loadtest/config/config.go": ["streaminsert"], + "rust/libs/ngt-rs/Cargo.toml": ["miette"], + "rust/libs/ngt-rs/build.rs": [ + "BFLOAT", + "DNGT", + "dylib", + "fopenmp", + "gomp", + "miette", + "rustc" + ], + "rust/libs/ngt-rs/src/input.cpp": ["cpath", "ngtresults", "vquery"], + "rust/libs/ngt-rs/src/lib.rs": ["repr"], + "rust/libs/observability/Cargo.toml": ["reqwest", "scopeguard", "serde"], + "rust/libs/observability/src/macros.rs": ["Updown"], + "rust/libs/proto/src/payload.v1.rs": ["repr"], + "tests/chaos/chart/README.md": ["kbps", "minburst", "peakrate"], + "tests/chaos/chart/templates/network/bandwidth.yaml": [ + "minburst", + "peakrate" + ], + "tests/chaos/chart/values.yaml": ["kbps", "minburst", "peakrate"], + "tests/e2e/crud/crud_test.go": ["ECRUD"], + "tests/e2e/kubernetes/client/client.go": [ + "Clientset", + "clientcmd", + "clientset" + ], + "tests/e2e/kubernetes/kubectl/kubectl.go": ["rollouts", "subcmds"], + "tests/e2e/kubernetes/portforward/portforward.go": [ + "genericclioptions", + "portforwarder", + "spdy", + "upgrader" + ], + "tests/e2e/operation/stream.go": ["evalidator", "svalidator"] + } } diff --git a/.gitfiles b/.gitfiles index 64f792a39c..c44a5deea3 100644 --- a/.gitfiles +++ b/.gitfiles @@ -46,7 +46,7 @@ .github/chatops_permissions.yaml .github/codeql/codeql-config.yaml .github/conflint.yaml -.github/dependabot.yml +.github/dependabot.yaml .github/helm/values/vald-mirror-target.yaml .github/helm/values/values-agent-sidecar.yaml .github/helm/values/values-chaos.yaml @@ -60,72 +60,72 @@ .github/helm/values/values-readreplica.yaml .github/issue_label_bot.yaml .github/kubelinter.yaml -.github/labeler.yml +.github/labeler.yaml .github/valdrelease/valdrelease.yaml -.github/workflows/_detect-ci-container.yml +.github/workflows/_detect-ci-container.yaml .github/workflows/_docker-image-scan.yaml .github/workflows/_docker-image.yaml -.github/workflows/_release-pr.yml +.github/workflows/_release-pr.yaml .github/workflows/_update-protobuf.yaml -.github/workflows/backport.yml -.github/workflows/build-binaries.yml -.github/workflows/build-protobuf.yml -.github/workflows/chatops-help.yml -.github/workflows/chatops.yml -.github/workflows/check-conflict.yml -.github/workflows/codeql-analysis.yml -.github/workflows/coverage.yml -.github/workflows/detect-internal-config-changes.yml -.github/workflows/dockers-agent-faiss-image.yml -.github/workflows/dockers-agent-image.yml -.github/workflows/dockers-agent-ngt-image.yml -.github/workflows/dockers-agent-sidecar-image.yml -.github/workflows/dockers-benchmark-job-image.yml +.github/workflows/backport.yaml +.github/workflows/build-binaries.yaml +.github/workflows/build-protobuf.yaml +.github/workflows/chatops-help.yaml +.github/workflows/chatops.yaml +.github/workflows/check-conflict.yaml +.github/workflows/codeql-analysis.yaml +.github/workflows/coverage.yaml +.github/workflows/detect-internal-config-changes.yaml +.github/workflows/dockers-agent-faiss-image.yaml +.github/workflows/dockers-agent-image.yaml +.github/workflows/dockers-agent-ngt-image.yaml +.github/workflows/dockers-agent-sidecar-image.yaml +.github/workflows/dockers-benchmark-job-image.yaml .github/workflows/dockers-benchmark-operator-image.yaml .github/workflows/dockers-binfmt-image.yaml -.github/workflows/dockers-buildbase-image.yml +.github/workflows/dockers-buildbase-image.yaml .github/workflows/dockers-buildkit-image.yaml .github/workflows/dockers-buildkit-syft-scanner-image.yaml -.github/workflows/dockers-ci-container-image.yml -.github/workflows/dockers-dev-container-image.yml -.github/workflows/dockers-discoverer-k8s-image.yml -.github/workflows/dockers-gateway-filter-image.yml -.github/workflows/dockers-gateway-lb-image.yml +.github/workflows/dockers-ci-container-image.yaml +.github/workflows/dockers-dev-container-image.yaml +.github/workflows/dockers-discoverer-k8s-image.yaml +.github/workflows/dockers-gateway-filter-image.yaml +.github/workflows/dockers-gateway-lb-image.yaml .github/workflows/dockers-gateway-mirror-image.yaml -.github/workflows/dockers-helm-operator-image.yml -.github/workflows/dockers-image-scan.yml -.github/workflows/dockers-index-correction.yml -.github/workflows/dockers-index-creation.yml -.github/workflows/dockers-index-operator.yml -.github/workflows/dockers-index-save.yml -.github/workflows/dockers-loadtest-image.yml -.github/workflows/dockers-manager-index-image.yml -.github/workflows/dockers-readreplica-rotate.yml +.github/workflows/dockers-helm-operator-image.yaml +.github/workflows/dockers-image-scan.yaml +.github/workflows/dockers-index-correction-image.yaml +.github/workflows/dockers-index-creation-image.yaml +.github/workflows/dockers-index-operator-image.yaml +.github/workflows/dockers-index-save-image.yaml +.github/workflows/dockers-loadtest-image.yaml +.github/workflows/dockers-manager-index-image.yaml +.github/workflows/dockers-readreplica-rotate-image.yaml .github/workflows/dockers-release-branch-image.yaml .github/workflows/e2e-chaos.yaml .github/workflows/e2e-code-bench-agent.yaml -.github/workflows/e2e-max-dim.yml -.github/workflows/e2e-profiling.yml -.github/workflows/e2e.yml -.github/workflows/format.yml -.github/workflows/fossa.yml -.github/workflows/helm-lint.yml -.github/workflows/helm.yml +.github/workflows/e2e-max-dim.yaml +.github/workflows/e2e-profiling.yaml +.github/workflows/e2e.yaml +.github/workflows/format.yaml +.github/workflows/fossa.yaml +.github/workflows/helm-lint.yaml +.github/workflows/helm.yaml .github/workflows/issue-metrics.yaml -.github/workflows/labeler.yml -.github/workflows/release.yml -.github/workflows/reviewdog-hadolint.yml -.github/workflows/reviewdog-k8s.yml -.github/workflows/reviewdog-markdown.yml -.github/workflows/reviewdog.yml +.github/workflows/labeler.yaml +.github/workflows/release.yaml +.github/workflows/reviewdog-hadolint.yaml +.github/workflows/reviewdog-k8s.yaml +.github/workflows/reviewdog-markdown.yaml +.github/workflows/reviewdog.yaml .github/workflows/semver-major-minor.yaml .github/workflows/semver-patch.yaml -.github/workflows/test-hack.yml +.github/workflows/test-hack.yaml .github/workflows/unit-test.yaml .github/workflows/update-actions.yaml .github/workflows/update-protobuf.yaml -.github/workflows/update-pull-request-and-issue-template.yml -.github/workflows/update-web-docs.yml +.github/workflows/update-pull-request-and-issue-template.yaml +.github/workflows/update-web-docs.yaml .gitignore .golangci.yml .prh.yaml @@ -140,7 +140,6 @@ Makefile Makefile.d/actions.mk Makefile.d/bench.mk Makefile.d/build.mk -Makefile.d/client.mk Makefile.d/dependencies.mk Makefile.d/docker.mk Makefile.d/e2e.mk @@ -511,6 +510,7 @@ cmd/tools/cli/loadtest/main_test.go cmd/tools/cli/loadtest/sample.yaml cmd/tools/cli/vdctl/main.go cmd/tools/cli/vdctl/main_test.go +codecov.yaml design/Vald Architecture Assets.drawio design/Vald Architecture Dataflow.drawio design/Vald Architecture Overview.drawio @@ -625,6 +625,7 @@ example/manifest/scylla/job.yaml go.mod go.sum hack/CHANGELOG.template.md +hack/actions/gen/main.go hack/benchmark/assets/checksum/fashion-mnist-784-euclidean.md5 hack/benchmark/assets/checksum/gist-960-euclidean.md5 hack/benchmark/assets/checksum/glove-100-angular.md5 @@ -698,6 +699,8 @@ hack/benchmark/internal/starter/gateway/vald/vald_test.go hack/benchmark/internal/starter/starter.go hack/benchmark/metrics/metrics.go hack/benchmark/src/singleflight/singleflight_bench_test.go +hack/cspell/main.go +hack/cspell/main_test.go hack/docker/gen/main.go hack/git/hooks/pre-commit hack/go.mod.default @@ -1594,6 +1597,8 @@ pkg/agent/internal/kvs/kvs.go pkg/agent/internal/kvs/kvs_test.go pkg/agent/internal/kvs/option.go pkg/agent/internal/kvs/option_test.go +pkg/agent/internal/memstore/data_manager.go +pkg/agent/internal/memstore/data_manager_test.go pkg/agent/internal/metadata/metadata.go pkg/agent/internal/metadata/metadata_test.go pkg/agent/internal/vqueue/option.go @@ -1897,13 +1902,20 @@ rust/bin/agent/src/handler/search.rs rust/bin/agent/src/handler/update.rs rust/bin/agent/src/handler/upsert.rs rust/bin/agent/src/main.rs -rust/libs/ngt-rs/Cargo.toml -rust/libs/ngt-rs/build.rs -rust/libs/ngt-rs/src/input.cpp -rust/libs/ngt-rs/src/input.h -rust/libs/ngt-rs/src/lib.rs -rust/libs/ngt/Cargo.toml -rust/libs/ngt/src/lib.rs +rust/libs/algorithm/Cargo.toml +rust/libs/algorithm/src/lib.rs +rust/libs/algorithms/faiss/Cargo.toml +rust/libs/algorithms/faiss/src/lib.rs +rust/libs/algorithms/ngt/Cargo.toml +rust/libs/algorithms/ngt/build.rs +rust/libs/algorithms/ngt/src/input.cpp +rust/libs/algorithms/ngt/src/input.h +rust/libs/algorithms/ngt/src/lib.rs +rust/libs/observability/Cargo.toml +rust/libs/observability/src/config.rs +rust/libs/observability/src/lib.rs +rust/libs/observability/src/macros.rs +rust/libs/observability/src/observability.rs rust/libs/proto/Cargo.toml rust/libs/proto/src/core.v1.tonic.rs rust/libs/proto/src/discoverer.v1.tonic.rs @@ -1965,7 +1977,6 @@ versions/PROTOBUF_VERSION versions/REVIEWDOG_VERSION versions/RUST_VERSION versions/TELEPRESENCE_VERSION -versions/VALDCLI_VERSION versions/VALD_VERSION versions/YQ_VERSION versions/ZLIB_VERSION diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index a6af52139d..709d8587ce 100755 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -23,10 +23,18 @@ assignees: "" - Vald Version: v1.7.13 +<<<<<<< HEAD - Go Version: v1.22.6 - Rust Version: v1.80.0 - Docker Version: v27.1.1 - Kubernetes Version: v1.30.3 - Helm Version: v3.15.3 +======= +- Go Version: v1.23.1 +- Rust Version: v1.81.0 +- Docker Version: v27.2.1 +- Kubernetes Version: v1.31.0 +- Helm Version: v3.15.4 +>>>>>>> 3ba0002d4 (Add UpdateTimestamp API (#2605)) - NGT Version: v2.2.4 - Faiss Version: v1.8.0 diff --git a/.github/ISSUE_TEMPLATE/security_issue_report.md b/.github/ISSUE_TEMPLATE/security_issue_report.md index 0b668c55a4..dc80010b0e 100644 --- a/.github/ISSUE_TEMPLATE/security_issue_report.md +++ b/.github/ISSUE_TEMPLATE/security_issue_report.md @@ -17,10 +17,18 @@ assignees: "" - Vald Version: v1.7.13 +<<<<<<< HEAD - Go Version: v1.22.6 - Rust Version: v1.80.0 - Docker Version: v27.1.1 - Kubernetes Version: v1.30.3 - Helm Version: v3.15.3 +======= +- Go Version: v1.23.1 +- Rust Version: v1.81.0 +- Docker Version: v27.2.1 +- Kubernetes Version: v1.31.0 +- Helm Version: v3.15.4 +>>>>>>> 3ba0002d4 (Add UpdateTimestamp API (#2605)) - NGT Version: v2.2.4 - Faiss Version: v1.8.0 diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index dbf641e9f8..3fbfd9b246 100755 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -16,11 +16,19 @@ - Vald Version: v1.7.13 +<<<<<<< HEAD - Go Version: v1.22.6 - Rust Version: v1.80.0 - Docker Version: v27.1.1 - Kubernetes Version: v1.30.3 - Helm Version: v3.15.3 +======= +- Go Version: v1.23.1 +- Rust Version: v1.81.0 +- Docker Version: v27.2.1 +- Kubernetes Version: v1.31.0 +- Helm Version: v3.15.4 +>>>>>>> 3ba0002d4 (Add UpdateTimestamp API (#2605)) - NGT Version: v2.2.4 - Faiss Version: v1.8.0 diff --git a/.github/dependabot.yml b/.github/dependabot.yaml similarity index 100% rename from .github/dependabot.yml rename to .github/dependabot.yaml diff --git a/.github/helm/values/values-correction.yaml b/.github/helm/values/values-correction.yaml index 888931ca6f..0632c3d2f7 100644 --- a/.github/helm/values/values-correction.yaml +++ b/.github/helm/values/values-correction.yaml @@ -16,7 +16,7 @@ defaults: logging: - level: info + level: debug networkPolicy: enabled: true gateway: diff --git a/.github/labeler.yml b/.github/labeler.yaml similarity index 100% rename from .github/labeler.yml rename to .github/labeler.yaml diff --git a/.github/workflows/_detect-ci-container.yml b/.github/workflows/_detect-ci-container.yaml similarity index 100% rename from .github/workflows/_detect-ci-container.yml rename to .github/workflows/_detect-ci-container.yaml diff --git a/.github/workflows/_release-pr.yml b/.github/workflows/_release-pr.yaml similarity index 99% rename from .github/workflows/_release-pr.yml rename to .github/workflows/_release-pr.yaml index 161aa00e05..31dd60b417 100644 --- a/.github/workflows/_release-pr.yml +++ b/.github/workflows/_release-pr.yaml @@ -32,7 +32,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml secrets: inherit create: needs: diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yaml similarity index 100% rename from .github/workflows/backport.yml rename to .github/workflows/backport.yaml diff --git a/.github/workflows/build-binaries.yml b/.github/workflows/build-binaries.yaml similarity index 97% rename from .github/workflows/build-binaries.yml rename to .github/workflows/build-binaries.yaml index fb794482be..f87e9151fb 100644 --- a/.github/workflows/build-binaries.yml +++ b/.github/workflows/build-binaries.yaml @@ -25,7 +25,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml build-linux: runs-on: ubuntu-latest needs: [detect-ci-container] diff --git a/.github/workflows/build-protobuf.yml b/.github/workflows/build-protobuf.yaml similarity index 96% rename from .github/workflows/build-protobuf.yml rename to .github/workflows/build-protobuf.yaml index ce7bba459c..34b2d6e55e 100644 --- a/.github/workflows/build-protobuf.yml +++ b/.github/workflows/build-protobuf.yaml @@ -34,7 +34,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml build: runs-on: ubuntu-latest needs: [detect-ci-container] diff --git a/.github/workflows/chatops-help.yml b/.github/workflows/chatops-help.yaml similarity index 100% rename from .github/workflows/chatops-help.yml rename to .github/workflows/chatops-help.yaml diff --git a/.github/workflows/chatops.yml b/.github/workflows/chatops.yaml similarity index 100% rename from .github/workflows/chatops.yml rename to .github/workflows/chatops.yaml diff --git a/.github/workflows/check-conflict.yml b/.github/workflows/check-conflict.yaml similarity index 98% rename from .github/workflows/check-conflict.yml rename to .github/workflows/check-conflict.yaml index d2eda8436f..7cd764f0f2 100644 --- a/.github/workflows/check-conflict.yml +++ b/.github/workflows/check-conflict.yaml @@ -35,7 +35,7 @@ jobs: git config --global --add safe.directory ${GITHUB_WORKSPACE} - name: Check conflict run: | - if grep -r "<<<< HEAD" . --exclude-dir=.git --exclude=check-conflict.yml; then + if grep -r "<<<< HEAD" . --exclude-dir=.git --exclude=check-conflict.yaml; then PR_COMMENTS=`curl ${API_URL}?per_page=10000` BODY=`echo -E "${PR_COMMENTS}" | jq 'last(.[] | select(.user.login == "vdaas-ci") | select(.body | test("^\\\\*\\\\*\\\\[WARNING:CONFLICT")) | .body)' -r` diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yaml similarity index 95% rename from .github/workflows/codeql-analysis.yml rename to .github/workflows/codeql-analysis.yaml index afbc82eb24..648c3e977b 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yaml @@ -31,7 +31,7 @@ on: - "release/v*.*" - "!release/v*.*.*" paths: - - ".github/workflows/codeql-analysis.yml" + - ".github/workflows/codeql-analysis.yaml" - "**.go" schedule: - cron: "0 1 * * *" @@ -42,7 +42,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml codeql-build: name: CodeQL runs-on: ubuntu-latest diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yaml similarity index 93% rename from .github/workflows/coverage.yml rename to .github/workflows/coverage.yaml index 43263cf90a..eca2c95487 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yaml @@ -19,13 +19,13 @@ on: branches: - main paths: - - ".github/workflows/coverage.yml" + - ".github/workflows/coverage.yaml" - "internal/**" - "pkg/**" - "cmd/**" pull_request: paths: - - ".github/workflows/coverage.yml" + - ".github/workflows/coverage.yaml" - "internal/**" - "pkg/**" - "cmd/**" @@ -36,7 +36,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml coverage: runs-on: ubuntu-latest needs: [detect-ci-container] diff --git a/.github/workflows/detect-internal-config-changes.yml b/.github/workflows/detect-internal-config-changes.yaml similarity index 100% rename from .github/workflows/detect-internal-config-changes.yml rename to .github/workflows/detect-internal-config-changes.yaml diff --git a/.github/workflows/dockers-agent-faiss-image.yaml b/.github/workflows/dockers-agent-faiss-image.yaml index bda1be82ae..691dce26c6 100755 --- a/.github/workflows/dockers-agent-faiss-image.yaml +++ b/.github/workflows/dockers-agent-faiss-image.yaml @@ -14,66 +14,66 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: agent-faiss' +name: "Build docker image: agent-faiss" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/agent/core/faiss/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-agent-faiss-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/agent/core/faiss/** - - pkg/agent/core/faiss/** - - pkg/agent/internal/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - versions/FAISS_VERSION - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/agent/core/faiss/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-agent-faiss-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/agent/core/faiss/** + - pkg/agent/core/faiss/** + - pkg/agent/internal/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - versions/FAISS_VERSION + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/agent/core/faiss/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-agent-faiss-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/agent/core/faiss/** - - pkg/agent/core/faiss/** - - pkg/agent/internal/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - versions/FAISS_VERSION - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/agent/core/faiss/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-agent-faiss-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/agent/core/faiss/** + - pkg/agent/core/faiss/** + - pkg/agent/internal/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - versions/FAISS_VERSION + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-agent-image.yaml b/.github/workflows/dockers-agent-image.yaml index d6665fe250..f73b44f46d 100755 --- a/.github/workflows/dockers-agent-image.yaml +++ b/.github/workflows/dockers-agent-image.yaml @@ -14,68 +14,68 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: agent' +name: "Build docker image: agent" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/agent/core/agent/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-agent-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/agent/core/agent/** - - pkg/agent/core/agent/** - - apis/grpc/** - - apis/proto/** - - rust/Cargo.lock - - rust/Cargo.toml - - rust/bin/agent - - rust/libs/ngt-rs/** - - rust/libs/ngt/** - - rust/libs/proto/** - - versions/RUST_VERSION - - versions/FAISS_VERSION - - versions/NGT_VERSION - - versions/FAISS_VERSION - - versions/NGT_VERSION - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/agent/core/agent/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-agent-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/agent/core/agent/** + - pkg/agent/core/agent/** + - apis/grpc/** + - apis/proto/** + - rust/Cargo.lock + - rust/Cargo.toml + - rust/bin/agent + - rust/libs/ngt-rs/** + - rust/libs/ngt/** + - rust/libs/proto/** + - versions/RUST_VERSION + - versions/FAISS_VERSION + - versions/NGT_VERSION + - versions/FAISS_VERSION + - versions/NGT_VERSION + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/agent/core/agent/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-agent-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/agent/core/agent/** - - pkg/agent/core/agent/** - - apis/grpc/** - - apis/proto/** - - rust/Cargo.lock - - rust/Cargo.toml - - rust/bin/agent - - rust/libs/ngt-rs/** - - rust/libs/ngt/** - - rust/libs/proto/** - - versions/RUST_VERSION - - versions/FAISS_VERSION - - versions/NGT_VERSION - - versions/FAISS_VERSION - - versions/NGT_VERSION - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/agent/core/agent/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-agent-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/agent/core/agent/** + - pkg/agent/core/agent/** + - apis/grpc/** + - apis/proto/** + - rust/Cargo.lock + - rust/Cargo.toml + - rust/bin/agent + - rust/libs/ngt-rs/** + - rust/libs/ngt/** + - rust/libs/proto/** + - versions/RUST_VERSION + - versions/FAISS_VERSION + - versions/NGT_VERSION + - versions/FAISS_VERSION + - versions/NGT_VERSION + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-agent-ngt-image.yaml b/.github/workflows/dockers-agent-ngt-image.yaml index e2587cc568..84c3b8f862 100755 --- a/.github/workflows/dockers-agent-ngt-image.yaml +++ b/.github/workflows/dockers-agent-ngt-image.yaml @@ -14,64 +14,64 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: agent-ngt' +name: "Build docker image: agent-ngt" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/agent/core/ngt/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-agent-ngt-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/agent/core/ngt/** - - pkg/agent/core/ngt/** - - pkg/agent/internal/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - versions/NGT_VERSION - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/agent/core/ngt/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-agent-ngt-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/agent/core/ngt/** + - pkg/agent/core/ngt/** + - pkg/agent/internal/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - versions/NGT_VERSION + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/agent/core/ngt/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-agent-ngt-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/agent/core/ngt/** - - pkg/agent/core/ngt/** - - pkg/agent/internal/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - versions/NGT_VERSION - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/agent/core/ngt/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-agent-ngt-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/agent/core/ngt/** + - pkg/agent/core/ngt/** + - pkg/agent/internal/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - versions/NGT_VERSION + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-agent-sidecar-image.yaml b/.github/workflows/dockers-agent-sidecar-image.yaml index 78456cab05..497151aee0 100755 --- a/.github/workflows/dockers-agent-sidecar-image.yaml +++ b/.github/workflows/dockers-agent-sidecar-image.yaml @@ -14,64 +14,64 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: agent-sidecar' +name: "Build docker image: agent-sidecar" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/agent/sidecar/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-agent-sidecar-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/agent/sidecar/** - - pkg/agent/sidecar/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - internal/db/storage/blob/** - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/agent/sidecar/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-agent-sidecar-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/agent/sidecar/** + - pkg/agent/sidecar/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - internal/db/storage/blob/** + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/agent/sidecar/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-agent-sidecar-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/agent/sidecar/** - - pkg/agent/sidecar/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - internal/db/storage/blob/** - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/agent/sidecar/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-agent-sidecar-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/agent/sidecar/** + - pkg/agent/sidecar/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - internal/db/storage/blob/** + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-benchmark-job-image.yaml b/.github/workflows/dockers-benchmark-job-image.yaml index 6ff59e9816..23028c31bf 100755 --- a/.github/workflows/dockers-benchmark-job-image.yaml +++ b/.github/workflows/dockers-benchmark-job-image.yaml @@ -14,64 +14,64 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: benchmark-job' +name: "Build docker image: benchmark-job" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/tools/benchmark/job/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-benchmark-job-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/tools/benchmark/job/** - - pkg/tools/benchmark/job/** - - cmd/tools/benchmark/operators/** - - pkg/tools/benchmark/operators/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/tools/benchmark/job/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-benchmark-job-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/tools/benchmark/job/** + - pkg/tools/benchmark/job/** + - cmd/tools/benchmark/operators/** + - pkg/tools/benchmark/operators/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/tools/benchmark/job/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-benchmark-job-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/tools/benchmark/job/** - - pkg/tools/benchmark/job/** - - cmd/tools/benchmark/operators/** - - pkg/tools/benchmark/operators/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/tools/benchmark/job/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-benchmark-job-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/tools/benchmark/job/** + - pkg/tools/benchmark/job/** + - cmd/tools/benchmark/operators/** + - pkg/tools/benchmark/operators/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-benchmark-operator-image.yaml b/.github/workflows/dockers-benchmark-operator-image.yaml index 31c0f0f341..764f505d76 100755 --- a/.github/workflows/dockers-benchmark-operator-image.yaml +++ b/.github/workflows/dockers-benchmark-operator-image.yaml @@ -14,64 +14,64 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: benchmark-operator' +name: "Build docker image: benchmark-operator" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/tools/benchmark/operator/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-benchmark-operator-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/tools/benchmark/operator/** - - pkg/tools/benchmark/operator/** - - cmd/tools/benchmark/jobs/** - - pkg/tools/benchmark/jobs/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/tools/benchmark/operator/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-benchmark-operator-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/tools/benchmark/operator/** + - pkg/tools/benchmark/operator/** + - cmd/tools/benchmark/jobs/** + - pkg/tools/benchmark/jobs/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/tools/benchmark/operator/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-benchmark-operator-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/tools/benchmark/operator/** - - pkg/tools/benchmark/operator/** - - cmd/tools/benchmark/jobs/** - - pkg/tools/benchmark/jobs/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/tools/benchmark/operator/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-benchmark-operator-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/tools/benchmark/operator/** + - pkg/tools/benchmark/operator/** + - cmd/tools/benchmark/jobs/** + - pkg/tools/benchmark/jobs/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-binfmt-image.yaml b/.github/workflows/dockers-binfmt-image.yaml index 4689f56333..497a188926 100755 --- a/.github/workflows/dockers-binfmt-image.yaml +++ b/.github/workflows/dockers-binfmt-image.yaml @@ -14,60 +14,60 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: binfmt' +name: "Build docker image: binfmt" on: schedule: - - cron: 0 * * * * + - cron: 0 * * * * push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/binfmt/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-binfmt-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/binfmt/** - - pkg/binfmt/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' + - hack/docker/gen/main.go + - dockers/binfmt/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-binfmt-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/binfmt/** + - pkg/binfmt/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/binfmt/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-binfmt-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/binfmt/** - - pkg/binfmt/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' + - hack/docker/gen/main.go + - dockers/binfmt/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-binfmt-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/binfmt/** + - pkg/binfmt/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-buildbase-image.yaml b/.github/workflows/dockers-buildbase-image.yaml index 5117c243fc..58b0a1ef83 100755 --- a/.github/workflows/dockers-buildbase-image.yaml +++ b/.github/workflows/dockers-buildbase-image.yaml @@ -14,60 +14,60 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: buildbase' +name: "Build docker image: buildbase" on: schedule: - - cron: 0 * * * * + - cron: 0 * * * * push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/buildbase/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-buildbase-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/buildbase/** - - pkg/buildbase/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' + - hack/docker/gen/main.go + - dockers/buildbase/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-buildbase-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/buildbase/** + - pkg/buildbase/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/buildbase/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-buildbase-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/buildbase/** - - pkg/buildbase/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' + - hack/docker/gen/main.go + - dockers/buildbase/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-buildbase-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/buildbase/** + - pkg/buildbase/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-buildkit-image.yaml b/.github/workflows/dockers-buildkit-image.yaml index df7d955c0f..1d6ceea7c4 100755 --- a/.github/workflows/dockers-buildkit-image.yaml +++ b/.github/workflows/dockers-buildkit-image.yaml @@ -14,60 +14,60 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: buildkit' +name: "Build docker image: buildkit" on: schedule: - - cron: 0 * * * * + - cron: 0 * * * * push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/buildkit/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-buildkit-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/buildkit/** - - pkg/buildkit/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' + - hack/docker/gen/main.go + - dockers/buildkit/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-buildkit-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/buildkit/** + - pkg/buildkit/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/buildkit/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-buildkit-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/buildkit/** - - pkg/buildkit/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' + - hack/docker/gen/main.go + - dockers/buildkit/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-buildkit-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/buildkit/** + - pkg/buildkit/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-buildkit-syft-scanner-image.yaml b/.github/workflows/dockers-buildkit-syft-scanner-image.yaml index b74ee0acfb..b475416305 100755 --- a/.github/workflows/dockers-buildkit-syft-scanner-image.yaml +++ b/.github/workflows/dockers-buildkit-syft-scanner-image.yaml @@ -14,60 +14,60 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: buildkit-syft-scanner' +name: "Build docker image: buildkit-syft-scanner" on: schedule: - - cron: 0 * * * * + - cron: 0 * * * * push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/buildkit/syft/scanner/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-buildkit-syft-scanner-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/buildkit/syft/scanner/** - - pkg/buildkit/syft/scanner/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' + - hack/docker/gen/main.go + - dockers/buildkit/syft/scanner/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-buildkit-syft-scanner-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/buildkit/syft/scanner/** + - pkg/buildkit/syft/scanner/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/buildkit/syft/scanner/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-buildkit-syft-scanner-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/buildkit/syft/scanner/** - - pkg/buildkit/syft/scanner/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' + - hack/docker/gen/main.go + - dockers/buildkit/syft/scanner/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-buildkit-syft-scanner-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/buildkit/syft/scanner/** + - pkg/buildkit/syft/scanner/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-ci-container-image.yaml b/.github/workflows/dockers-ci-container-image.yaml index fa6ffb3943..1e3e501f7a 100755 --- a/.github/workflows/dockers-ci-container-image.yaml +++ b/.github/workflows/dockers-ci-container-image.yaml @@ -14,48 +14,48 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: ci-container' +name: "Build docker image: ci-container" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/ci/base/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-ci-container-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/ci/base/** - - pkg/ci/base/** - - apis/grpc/** - - apis/proto/** - - hack/** - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/ci/base/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-ci-container-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/ci/base/** + - pkg/ci/base/** + - apis/grpc/** + - apis/proto/** + - hack/** + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/ci/base/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-ci-container-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/ci/base/** - - pkg/ci/base/** - - apis/grpc/** - - apis/proto/** - - hack/** - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/ci/base/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-ci-container-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/ci/base/** + - pkg/ci/base/** + - apis/grpc/** + - apis/proto/** + - hack/** + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-dev-container-image.yaml b/.github/workflows/dockers-dev-container-image.yaml index ee83a872c4..163134d95e 100755 --- a/.github/workflows/dockers-dev-container-image.yaml +++ b/.github/workflows/dockers-dev-container-image.yaml @@ -14,48 +14,48 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: dev-container' +name: "Build docker image: dev-container" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/dev/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-dev-container-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/dev/** - - pkg/dev/** - - apis/grpc/** - - apis/proto/** - - hack/** - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/dev/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-dev-container-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/dev/** + - pkg/dev/** + - apis/grpc/** + - apis/proto/** + - hack/** + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/dev/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-dev-container-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/dev/** - - pkg/dev/** - - apis/grpc/** - - apis/proto/** - - hack/** - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/dev/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-dev-container-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/dev/** + - pkg/dev/** + - apis/grpc/** + - apis/proto/** + - hack/** + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-discoverer-k8s-image.yaml b/.github/workflows/dockers-discoverer-k8s-image.yaml index e0fea36ae9..a840e36cfd 100755 --- a/.github/workflows/dockers-discoverer-k8s-image.yaml +++ b/.github/workflows/dockers-discoverer-k8s-image.yaml @@ -14,60 +14,60 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: discoverer-k8s' +name: "Build docker image: discoverer-k8s" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/discoverer/k8s/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-discoverer-k8s-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/discoverer/k8s/** - - pkg/discoverer/k8s/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/discoverer/k8s/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-discoverer-k8s-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/discoverer/k8s/** + - pkg/discoverer/k8s/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/discoverer/k8s/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-discoverer-k8s-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/discoverer/k8s/** - - pkg/discoverer/k8s/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/discoverer/k8s/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-discoverer-k8s-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/discoverer/k8s/** + - pkg/discoverer/k8s/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-gateway-filter-image.yaml b/.github/workflows/dockers-gateway-filter-image.yaml index 185821dfd2..6d649d3c4a 100755 --- a/.github/workflows/dockers-gateway-filter-image.yaml +++ b/.github/workflows/dockers-gateway-filter-image.yaml @@ -14,64 +14,64 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: gateway-filter' +name: "Build docker image: gateway-filter" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/gateway/filter/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-gateway-filter-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/gateway/filter/** - - pkg/gateway/filter/** - - pkg/gateway/internal/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/gateway/filter/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-gateway-filter-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/gateway/filter/** + - pkg/gateway/filter/** + - pkg/gateway/internal/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/gateway/filter/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-gateway-filter-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/gateway/filter/** - - pkg/gateway/filter/** - - pkg/gateway/internal/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/gateway/filter/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-gateway-filter-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/gateway/filter/** + - pkg/gateway/filter/** + - pkg/gateway/internal/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-gateway-lb-image.yaml b/.github/workflows/dockers-gateway-lb-image.yaml index ffbe94fd42..f61792f67a 100755 --- a/.github/workflows/dockers-gateway-lb-image.yaml +++ b/.github/workflows/dockers-gateway-lb-image.yaml @@ -14,64 +14,64 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: gateway-lb' +name: "Build docker image: gateway-lb" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/gateway/lb/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-gateway-lb-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/gateway/lb/** - - pkg/gateway/lb/** - - pkg/gateway/internal/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/gateway/lb/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-gateway-lb-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/gateway/lb/** + - pkg/gateway/lb/** + - pkg/gateway/internal/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/gateway/lb/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-gateway-lb-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/gateway/lb/** - - pkg/gateway/lb/** - - pkg/gateway/internal/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/gateway/lb/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-gateway-lb-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/gateway/lb/** + - pkg/gateway/lb/** + - pkg/gateway/internal/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-gateway-mirror-image.yaml b/.github/workflows/dockers-gateway-mirror-image.yaml index 29e2f55f35..e31d0dcd98 100755 --- a/.github/workflows/dockers-gateway-mirror-image.yaml +++ b/.github/workflows/dockers-gateway-mirror-image.yaml @@ -14,62 +14,62 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: gateway-mirror' +name: "Build docker image: gateway-mirror" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/gateway/mirror/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-gateway-mirror-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/gateway/mirror/** - - pkg/gateway/mirror/** - - pkg/gateway/internal/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/gateway/mirror/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-gateway-mirror-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/gateway/mirror/** + - pkg/gateway/mirror/** + - pkg/gateway/internal/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/gateway/mirror/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-gateway-mirror-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/gateway/mirror/** - - pkg/gateway/mirror/** - - pkg/gateway/internal/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/gateway/mirror/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-gateway-mirror-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/gateway/mirror/** + - pkg/gateway/mirror/** + - pkg/gateway/internal/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-helm-operator-image.yaml b/.github/workflows/dockers-helm-operator-image.yaml index ec1e04537a..d721fe37f5 100755 --- a/.github/workflows/dockers-helm-operator-image.yaml +++ b/.github/workflows/dockers-helm-operator-image.yaml @@ -14,56 +14,56 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: helm-operator' +name: "Build docker image: helm-operator" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/operator/helm/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-helm-operator-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/operator/helm/** - - pkg/operator/helm/** - - charts/vald/Chart.yaml - - charts/vald/values.yaml - - charts/vald/templates/** - - charts/vald-helm-operator/Chart.yaml - - charts/vald-helm-operator/values.yaml - - charts/vald-helm-operator/templates/** - - versions/OPERATOR_SDK_VERSION - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/operator/helm/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-helm-operator-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/operator/helm/** + - pkg/operator/helm/** + - charts/vald/Chart.yaml + - charts/vald/values.yaml + - charts/vald/templates/** + - charts/vald-helm-operator/Chart.yaml + - charts/vald-helm-operator/values.yaml + - charts/vald-helm-operator/templates/** + - versions/OPERATOR_SDK_VERSION + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/operator/helm/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-helm-operator-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/operator/helm/** - - pkg/operator/helm/** - - charts/vald/Chart.yaml - - charts/vald/values.yaml - - charts/vald/templates/** - - charts/vald-helm-operator/Chart.yaml - - charts/vald-helm-operator/values.yaml - - charts/vald-helm-operator/templates/** - - versions/OPERATOR_SDK_VERSION - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/operator/helm/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-helm-operator-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/operator/helm/** + - pkg/operator/helm/** + - charts/vald/Chart.yaml + - charts/vald/values.yaml + - charts/vald/templates/** + - charts/vald-helm-operator/Chart.yaml + - charts/vald-helm-operator/values.yaml + - charts/vald-helm-operator/templates/** + - versions/OPERATOR_SDK_VERSION + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-image-scan.yml b/.github/workflows/dockers-image-scan.yaml similarity index 100% rename from .github/workflows/dockers-image-scan.yml rename to .github/workflows/dockers-image-scan.yaml diff --git a/.github/workflows/dockers-index-correction-image.yaml b/.github/workflows/dockers-index-correction-image.yaml index ed4060a8c6..6c9306740c 100755 --- a/.github/workflows/dockers-index-correction-image.yaml +++ b/.github/workflows/dockers-index-correction-image.yaml @@ -14,62 +14,62 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: index-correction' +name: "Build docker image: index-correction" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/index/job/correction/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-index-correction-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/index/job/correction/** - - pkg/index/job/correction/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/index/job/correction/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-index-correction-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/index/job/correction/** + - pkg/index/job/correction/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/index/job/correction/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-index-correction-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/index/job/correction/** - - pkg/index/job/correction/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/index/job/correction/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-index-correction-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/index/job/correction/** + - pkg/index/job/correction/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-index-creation-image.yaml b/.github/workflows/dockers-index-creation-image.yaml index 1d17b7d10f..b79aaa9e87 100755 --- a/.github/workflows/dockers-index-creation-image.yaml +++ b/.github/workflows/dockers-index-creation-image.yaml @@ -14,62 +14,62 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: index-creation' +name: "Build docker image: index-creation" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/index/job/creation/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-index-creation-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/index/job/creation/** - - pkg/index/job/creation/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/index/job/creation/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-index-creation-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/index/job/creation/** + - pkg/index/job/creation/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/index/job/creation/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-index-creation-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/index/job/creation/** - - pkg/index/job/creation/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/index/job/creation/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-index-creation-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/index/job/creation/** + - pkg/index/job/creation/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-index-operator-image.yaml b/.github/workflows/dockers-index-operator-image.yaml index f83b0f6c86..1287ac02d8 100755 --- a/.github/workflows/dockers-index-operator-image.yaml +++ b/.github/workflows/dockers-index-operator-image.yaml @@ -14,60 +14,60 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: index-operator' +name: "Build docker image: index-operator" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/index/operator/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-index-operator-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/index/operator/** - - pkg/index/operator/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/index/operator/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-index-operator-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/index/operator/** + - pkg/index/operator/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/index/operator/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-index-operator-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/index/operator/** - - pkg/index/operator/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/index/operator/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-index-operator-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/index/operator/** + - pkg/index/operator/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-index-save-image.yaml b/.github/workflows/dockers-index-save-image.yaml index 568fbcb0b6..c1b6c838f3 100755 --- a/.github/workflows/dockers-index-save-image.yaml +++ b/.github/workflows/dockers-index-save-image.yaml @@ -14,62 +14,62 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: index-save' +name: "Build docker image: index-save" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/index/job/save/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-index-save-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/index/job/save/** - - pkg/index/job/save/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/index/job/save/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-index-save-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/index/job/save/** + - pkg/index/job/save/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/index/job/save/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-index-save-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/index/job/save/** - - pkg/index/job/save/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/index/job/save/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-index-save-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/index/job/save/** + - pkg/index/job/save/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-loadtest-image.yaml b/.github/workflows/dockers-loadtest-image.yaml index 6e561548bd..6bb824e9c0 100755 --- a/.github/workflows/dockers-loadtest-image.yaml +++ b/.github/workflows/dockers-loadtest-image.yaml @@ -14,62 +14,62 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: loadtest' +name: "Build docker image: loadtest" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/tools/cli/loadtest/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-loadtest-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/tools/cli/loadtest/** - - pkg/tools/cli/loadtest/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/tools/cli/loadtest/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-loadtest-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/tools/cli/loadtest/** + - pkg/tools/cli/loadtest/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/tools/cli/loadtest/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-loadtest-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/tools/cli/loadtest/** - - pkg/tools/cli/loadtest/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/tools/cli/loadtest/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-loadtest-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/tools/cli/loadtest/** + - pkg/tools/cli/loadtest/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-manager-index-image.yaml b/.github/workflows/dockers-manager-index-image.yaml index d7a22972b7..71cde3006c 100755 --- a/.github/workflows/dockers-manager-index-image.yaml +++ b/.github/workflows/dockers-manager-index-image.yaml @@ -14,62 +14,62 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: manager-index' +name: "Build docker image: manager-index" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/manager/index/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-manager-index-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/manager/index/** - - pkg/manager/index/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/manager/index/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-manager-index-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/manager/index/** + - pkg/manager/index/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/manager/index/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-manager-index-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/manager/index/** - - pkg/manager/index/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - '!internal/k8s/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/manager/index/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-manager-index-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/manager/index/** + - pkg/manager/index/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - "!internal/k8s/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-readreplica-rotate-image.yaml b/.github/workflows/dockers-readreplica-rotate-image.yaml index fd18858755..8d1b815991 100755 --- a/.github/workflows/dockers-readreplica-rotate-image.yaml +++ b/.github/workflows/dockers-readreplica-rotate-image.yaml @@ -14,60 +14,60 @@ # limitations under the License. # # DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/actions/gen/main.go -name: 'Build docker image: readreplica-rotate' +name: "Build docker image: readreplica-rotate" on: push: branches: - - main - - release/v*.* - - '!release/v*.*.*' + - main + - release/v*.* + - "!release/v*.*.*" tags: - - '*.*.*' - - v*.*.* - - '*.*.*-*' - - v*.*.*-* + - "*.*.*" + - v*.*.* + - "*.*.*-*" + - v*.*.*-* pull_request: paths: - - hack/docker/gen/main.go - - dockers/index/job/readreplica/rotate/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-readreplica-rotate-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/index/job/readreplica/rotate/** - - pkg/index/job/readreplica/rotate/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/index/job/readreplica/rotate/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-readreplica-rotate-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/index/job/readreplica/rotate/** + - pkg/index/job/readreplica/rotate/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** pull_request_target: paths: - - hack/docker/gen/main.go - - dockers/index/job/readreplica/rotate/Dockerfile - - hack/actions/gen/main.go - - .github/workflows/dockers-readreplica-rotate-image.yaml - - .github/actions/docker-build/action.yaml - - .github/workflows/_docker-image.yaml - - cmd/index/job/readreplica/rotate/** - - pkg/index/job/readreplica/rotate/** - - apis/grpc/** - - apis/proto/** - - go.mod - - go.sum - - versions/GO_VERSION - - internal/** - - '!internal/**/*_test.go' - - '!internal/**/*_mock.go' - - '!internal/db/**' - - Makefile - - Makefile.d/** + - hack/docker/gen/main.go + - dockers/index/job/readreplica/rotate/Dockerfile + - hack/actions/gen/main.go + - .github/workflows/dockers-readreplica-rotate-image.yaml + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - cmd/index/job/readreplica/rotate/** + - pkg/index/job/readreplica/rotate/** + - apis/grpc/** + - apis/proto/** + - go.mod + - go.sum + - versions/GO_VERSION + - internal/** + - "!internal/**/*_test.go" + - "!internal/**/*_mock.go" + - "!internal/db/**" + - Makefile + - Makefile.d/** jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/e2e-chaos.yaml b/.github/workflows/e2e-chaos.yaml index 473e4136cc..b7ce8ac4da 100644 --- a/.github/workflows/e2e-chaos.yaml +++ b/.github/workflows/e2e-chaos.yaml @@ -36,7 +36,7 @@ jobs: - uses: ./.github/actions/dump-context detect-ci-container: if: startsWith( github.ref, 'refs/tags/') || github.event.action == 'labeled' && github.event.label.name == 'actions/e2e-chaos' - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml agent-failure: name: "E2E chaos test (Agent failure: to test insert/search works even if one of the agents is failing)" needs: [detect-ci-container] diff --git a/.github/workflows/e2e-code-bench-agent.yaml b/.github/workflows/e2e-code-bench-agent.yaml index f6bfe972ce..a1cdad029d 100644 --- a/.github/workflows/e2e-code-bench-agent.yaml +++ b/.github/workflows/e2e-code-bench-agent.yaml @@ -19,7 +19,7 @@ on: branches: - main paths: - - ".github/workflows/e2e-bench-agent.yml" + - ".github/workflows/e2e-bench-agent.yaml" - "internal/core/**" - "internal/client/**" - "internal/net/**" @@ -31,7 +31,7 @@ on: - "versions/NGT_VERSION" pull_request: paths: - - ".github/workflows/e2e-bench-agent.yml" + - ".github/workflows/e2e-bench-agent.yaml" - "internal/core/**" - "internal/client/**" - "internal/net/**" @@ -51,7 +51,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml grpc-sequential: name: grpc-sequential runs-on: ubuntu-latest diff --git a/.github/workflows/e2e-max-dim.yml b/.github/workflows/e2e-max-dim.yaml similarity index 98% rename from .github/workflows/e2e-max-dim.yml rename to .github/workflows/e2e-max-dim.yaml index 4d5476b3fb..e01ce44855 100644 --- a/.github/workflows/e2e-max-dim.yml +++ b/.github/workflows/e2e-max-dim.yaml @@ -33,7 +33,7 @@ jobs: - uses: ./.github/actions/dump-context detect-ci-container: if: startsWith( github.ref, 'refs/tags/') || github.event.action == 'labeled' && github.event.label.name == 'actions/e2e-max-dim' - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml e2e-max-dimension-insert: name: "E2E test (Max Dimension Insert: skip strict exist check)" needs: [detect-ci-container] diff --git a/.github/workflows/e2e-profiling.yml b/.github/workflows/e2e-profiling.yaml similarity index 99% rename from .github/workflows/e2e-profiling.yml rename to .github/workflows/e2e-profiling.yaml index 1e285e1343..30a034d76b 100644 --- a/.github/workflows/e2e-profiling.yml +++ b/.github/workflows/e2e-profiling.yaml @@ -34,7 +34,7 @@ jobs: - uses: ./.github/actions/dump-context detect-ci-container: if: startsWith(github.ref, 'refs/tags/') || github.event.action == 'labeled' && github.event.label.name == 'actions/e2e-profiling' - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml e2e-profiling: name: "E2E profiling" needs: [detect-ci-container] diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yaml similarity index 99% rename from .github/workflows/e2e.yml rename to .github/workflows/e2e.yaml index 14b20bdb24..a808894923 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yaml @@ -34,7 +34,7 @@ jobs: - uses: ./.github/actions/dump-context detect-ci-container: if: startsWith( github.ref, 'refs/tags/') || github.event.action == 'labeled' && github.event.label.name == 'actions/e2e-deploy' - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml e2e-stream-crud: name: "E2E test (Stream CRUD)" needs: [detect-ci-container] diff --git a/.github/workflows/format.yml b/.github/workflows/format.yaml similarity index 98% rename from .github/workflows/format.yml rename to .github/workflows/format.yaml index 684e04873f..846b523b42 100644 --- a/.github/workflows/format.yml +++ b/.github/workflows/format.yaml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml format: if: ${{ github.event_name == 'push' }} runs-on: ubuntu-latest diff --git a/.github/workflows/fossa.yml b/.github/workflows/fossa.yaml similarity index 98% rename from .github/workflows/fossa.yml rename to .github/workflows/fossa.yaml index 7b8f86140c..2e35b74abc 100644 --- a/.github/workflows/fossa.yml +++ b/.github/workflows/fossa.yaml @@ -29,7 +29,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml fossa-scan: name: "trigger FOSSA scan" runs-on: ubuntu-latest diff --git a/.github/workflows/helm-lint.yml b/.github/workflows/helm-lint.yaml similarity index 98% rename from .github/workflows/helm-lint.yml rename to .github/workflows/helm-lint.yaml index dce2c6b007..2d82d6c653 100644 --- a/.github/workflows/helm-lint.yml +++ b/.github/workflows/helm-lint.yaml @@ -25,7 +25,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml lint-vald-chart: name: lint for vald chart runs-on: ubuntu-latest diff --git a/.github/workflows/helm.yml b/.github/workflows/helm.yaml similarity index 98% rename from .github/workflows/helm.yml rename to .github/workflows/helm.yaml index 4539fdbfbb..970fb2a209 100644 --- a/.github/workflows/helm.yml +++ b/.github/workflows/helm.yaml @@ -28,7 +28,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml update-helm-chart: name: Update Helm chart runs-on: ubuntu-latest diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yaml similarity index 99% rename from .github/workflows/labeler.yml rename to .github/workflows/labeler.yaml index d18aedf1a4..f8c87db699 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yaml @@ -37,6 +37,7 @@ jobs: - uses: actions/labeler@v5 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" + configuration-path: ".github/labeler.yaml" - name: Add labels run: | pr_num=`cat $GITHUB_EVENT_PATH | jq -r ".number"` diff --git a/.github/workflows/release.yml b/.github/workflows/release.yaml similarity index 100% rename from .github/workflows/release.yml rename to .github/workflows/release.yaml diff --git a/.github/workflows/reviewdog-hadolint.yml b/.github/workflows/reviewdog-hadolint.yaml similarity index 100% rename from .github/workflows/reviewdog-hadolint.yml rename to .github/workflows/reviewdog-hadolint.yaml diff --git a/.github/workflows/reviewdog-k8s.yml b/.github/workflows/reviewdog-k8s.yaml similarity index 98% rename from .github/workflows/reviewdog-k8s.yml rename to .github/workflows/reviewdog-k8s.yaml index f0e55b340d..5fa7456861 100644 --- a/.github/workflows/reviewdog-k8s.yml +++ b/.github/workflows/reviewdog-k8s.yaml @@ -27,7 +27,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml kubeval-conflint: name: runner / kubeval-conflint runs-on: ubuntu-latest diff --git a/.github/workflows/reviewdog-markdown.yml b/.github/workflows/reviewdog-markdown.yaml similarity index 100% rename from .github/workflows/reviewdog-markdown.yml rename to .github/workflows/reviewdog-markdown.yaml diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yaml similarity index 94% rename from .github/workflows/reviewdog.yml rename to .github/workflows/reviewdog.yaml index 16261386f8..3b15ce2006 100644 --- a/.github/workflows/reviewdog.yml +++ b/.github/workflows/reviewdog.yaml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml golangci-lint: name: runner / golangci-lint runs-on: ubuntu-latest @@ -40,7 +40,7 @@ jobs: git config --global --add safe.directory ${GITHUB_WORKSPACE} - name: Run golangci-lint run: | - golangci-lint run --config .golangci.yml \ + golangci-lint run --config .golangci.yaml \ | reviewdog -f=golangci-lint -name=golangci -reporter=${REPORTER} -level=${LEVEL} env: REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/semver-major-minor.yaml b/.github/workflows/semver-major-minor.yaml index ebe710e4d2..755af82792 100644 --- a/.github/workflows/semver-major-minor.yaml +++ b/.github/workflows/semver-major-minor.yaml @@ -95,7 +95,7 @@ jobs: if: ${{ needs.semver-auto.outputs.RELEASE == 'true' }} needs: - semver-auto - uses: ./.github/workflows/_release-pr.yml + uses: ./.github/workflows/_release-pr.yaml with: release_branch_name: ${{ needs.semver-auto.outputs.RELEASE_BRANCH_NAME }} release_tag: ${{ needs.semver-auto.outputs.RELEASE_TAG }} diff --git a/.github/workflows/semver-patch.yaml b/.github/workflows/semver-patch.yaml index 50311be3a0..86d6d67a4e 100644 --- a/.github/workflows/semver-patch.yaml +++ b/.github/workflows/semver-patch.yaml @@ -71,7 +71,7 @@ jobs: if: ${{ needs.semver-auto.outputs.RELEASE == 'true' }} needs: - semver-auto - uses: ./.github/workflows/_release-pr.yml + uses: ./.github/workflows/_release-pr.yaml with: release_branch_name: "release/${{ needs.semver-auto.outputs.RELEASE_BRANCH_NAME_SUFFIX }}" release_tag: ${{ needs.semver-auto.outputs.RELEASE_TAG }} diff --git a/.github/workflows/test-hack.yml b/.github/workflows/test-hack.yaml similarity index 94% rename from .github/workflows/test-hack.yml rename to .github/workflows/test-hack.yaml index fbe9ff39e0..040ffb0ed5 100644 --- a/.github/workflows/test-hack.yml +++ b/.github/workflows/test-hack.yaml @@ -23,7 +23,7 @@ on: paths: - "go.mod" - "go.sum" - - ".github/workflows/test-hack.yml" + - ".github/workflows/test-hack.yaml" - "hack/gorules/**" - "hack/helm/**" - "hack/license/**" @@ -32,7 +32,7 @@ on: paths: - "go.mod" - "go.sum" - - ".github/workflows/test-hack.yml" + - ".github/workflows/test-hack.yaml" - "hack/gorules/**" - "hack/helm/**" - "hack/license/**" @@ -48,7 +48,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml test-hack: name: Run tests for hack packages runs-on: ubuntu-latest diff --git a/.github/workflows/unit-test.yaml b/.github/workflows/unit-test.yaml index 34df7527ef..df49b275e9 100644 --- a/.github/workflows/unit-test.yaml +++ b/.github/workflows/unit-test.yaml @@ -23,7 +23,7 @@ on: paths: - "go.mod" - "go.sum" - - ".github/workflows/test.yml" + - ".github/workflows/test.yaml" - "internal/**" - "pkg/**" - "cmd/**" @@ -31,7 +31,7 @@ on: paths: - "go.mod" - "go.sum" - - ".github/workflows/test.yml" + - ".github/workflows/test.yaml" - "internal/**" - "pkg/**" - "cmd/**" @@ -42,7 +42,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml test-cmd: name: Run tests for cmd packages runs-on: ubuntu-latest diff --git a/.github/workflows/update-actions.yaml b/.github/workflows/update-actions.yaml index 4a0c3cfc14..8500981889 100644 --- a/.github/workflows/update-actions.yaml +++ b/.github/workflows/update-actions.yaml @@ -52,7 +52,7 @@ jobs: fi - name: Create PR if: ${{ steps.check_diff.outputs.HAS_GIT_DIFF == 'true' }} - uses: peter-evans/create-pull-request@v6 + uses: peter-evans/create-pull-request@v7 with: author: "${{ secrets.DISPATCH_USER }} " token: ${{ secrets.DISPATCH_TOKEN }} diff --git a/.github/workflows/update-pull-request-and-issue-template.yml b/.github/workflows/update-pull-request-and-issue-template.yaml similarity index 100% rename from .github/workflows/update-pull-request-and-issue-template.yml rename to .github/workflows/update-pull-request-and-issue-template.yaml diff --git a/.github/workflows/update-web-docs.yml b/.github/workflows/update-web-docs.yaml similarity index 100% rename from .github/workflows/update-web-docs.yml rename to .github/workflows/update-web-docs.yaml diff --git a/Makefile b/Makefile index 42b9302bfc..1630d03f2b 100644 --- a/Makefile +++ b/Makefile @@ -103,7 +103,6 @@ PROTOBUF_VERSION := $(eval PROTOBUF_VERSION := $(shell cat versions/PRO REVIEWDOG_VERSION := $(eval REVIEWDOG_VERSION := $(shell cat versions/REVIEWDOG_VERSION))$(REVIEWDOG_VERSION) RUST_VERSION := $(eval RUST_VERSION := $(shell cat versions/RUST_VERSION))$(RUST_VERSION) TELEPRESENCE_VERSION := $(eval TELEPRESENCE_VERSION := $(shell cat versions/TELEPRESENCE_VERSION))$(TELEPRESENCE_VERSION) -VALDCLI_VERSION := $(eval VALDCLI_VERSION := $(shell cat versions/VALDCLI_VERSION))$(VALDCLI_VERSION) YQ_VERSION := $(eval YQ_VERSION := $(shell cat versions/YQ_VERSION))$(YQ_VERSION) ZLIB_VERSION := $(eval ZLIB_VERSION := $(shell cat versions/ZLIB_VERSION))$(ZLIB_VERSION) @@ -460,7 +459,6 @@ init: \ tools/install: \ helm/install \ kind/install \ - valdcli/install \ telepresence/install \ textlint/install @@ -625,10 +623,6 @@ version/helm: version/yq: @echo $(YQ_VERSION) -.PHONY: version/valdcli -version/valdcli: - @echo $(VALDCLI_VERSION) - .PHONY: version/telepresence version/telepresence: @echo $(TELEPRESENCE_VERSION) @@ -742,14 +736,14 @@ files/textlint: \ ## run cspell for document docs/cspell:\ cspell/install - cspell-cli $(ROOTDIR)/docs/**/*.md --show-suggestions $(CSPELL_EXTRA_OPTIONS) + cspell $(ROOTDIR)/docs/**/*.md --show-suggestions $(CSPELL_EXTRA_OPTIONS) .PHONY: files/cspell ## run cspell for document files/cspell: \ files \ cspell/install - cspell-cli $(ROOTDIR)/.gitfiles --show-suggestions $(CSPELL_EXTRA_OPTIONS) + cspell $(ROOTDIR)/.gitfiles --show-suggestions $(CSPELL_EXTRA_OPTIONS) .PHONY: changelog/update ## update changelog @@ -771,7 +765,6 @@ changelog/next/print: include Makefile.d/actions.mk include Makefile.d/bench.mk include Makefile.d/build.mk -include Makefile.d/client.mk include Makefile.d/dependencies.mk include Makefile.d/docker.mk include Makefile.d/e2e.mk @@ -780,7 +773,7 @@ include Makefile.d/helm.mk include Makefile.d/k3d.mk include Makefile.d/k8s.mk include Makefile.d/kind.mk +include Makefile.d/minikube.mk include Makefile.d/proto.mk include Makefile.d/test.mk include Makefile.d/tools.mk -include Makefile.d/minikube.mk diff --git a/Makefile.d/client.mk b/Makefile.d/client.mk deleted file mode 100644 index 1c5c7aa9bf..0000000000 --- a/Makefile.d/client.mk +++ /dev/null @@ -1,45 +0,0 @@ -# -# Copyright (C) 2019-2024 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -.PHONY: valdcli/install -## install valdcli -valdcli/install: $(BINDIR)/valdcli - -ifeq ($(UNAME),Darwin) -$(BINDIR)/valdcli: - mkdir -p $(BINDIR) - curl -fsSLO https://github.com/rinx/vald-client-clj/releases/download/$(VALDCLI_VERSION)/valdcli-macos.zip - unzip valdcli-macos.zip - rm -f valdcli-macos.zip - mv valdcli $(BINDIR)/valdcli -else -$(BINDIR)/valdcli: - mkdir -p $(BINDIR) - curl -fsSLO https://github.com/rinx/vald-client-clj/releases/download/$(VALDCLI_VERSION)/valdcli-linux-static.zip - unzip valdcli-linux-static.zip - rm -f valdcli-linux-static.zip - mv valdcli $(BINDIR)/valdcli -endif - -.PHONY: valdcli/xpanes/insert -## insert randomized vectors using valdcli and xpanes -valdcli/xpanes/insert: - xpanes -c "valdcli rand-vecs -n $(NUMBER) -d $(DIMENSION) --gaussian --gaussian-mean $(MEAN) --gaussian-stddev $(STDDEV) --with-ids | valdcli -h $(HOST) -p $(PORT) stream-insert --elapsed-time" $$(seq 1 $(NUMPANES)) - -.PHONY: valdcli/xpanes/search -## search randomized vectors using valdcli and xpanes -valdcli/xpanes/search: - xpanes -c "valdcli rand-vecs -n $(NUMBER) -d $(DIMENSION) --gaussian --gaussian-mean $(MEAN) --gaussian-stddev $(STDDEV) | valdcli -h $(HOST) -p $(PORT) stream-search --elapsed-time" $$(seq 1 $(NUMPANES)) diff --git a/Makefile.d/dependencies.mk b/Makefile.d/dependencies.mk index 928ed8c9d2..596e9ec1b2 100644 --- a/Makefile.d/dependencies.mk +++ b/Makefile.d/dependencies.mk @@ -39,7 +39,6 @@ update/libs: \ update/rust \ update/telepresence \ update/vald \ - update/valdcli \ update/yq \ update/zlib @@ -227,11 +226,6 @@ update/hdf5: update/vald: curl -fsSL https://api.github.com/repos/$(REPO)/releases/latest | grep -Po '"tag_name": "\K.*?(?=")' > $(ROOTDIR)/versions/VALD_VERSION -.PHONY: update/valdcli -## update vald client library made by clojure self version -update/valdcli: - curl -fsSL https://api.github.com/repos/$(REPO)-client-clj/releases/latest | grep -Po '"tag_name": "\K.*?(?=")' > $(ROOTDIR)/versions/VALDCLI_VERSION - .PHONY: update/template ## update PULL_REQUEST_TEMPLATE and ISSUE_TEMPLATE update/template: diff --git a/Makefile.d/docker.mk b/Makefile.d/docker.mk index 9a7ddd8a8a..b653757ffa 100644 --- a/Makefile.d/docker.mk +++ b/Makefile.d/docker.mk @@ -41,6 +41,33 @@ docker/build: \ docker/build/helm-operator \ docker/build/readreplica-rotate +docker/xpanes/build: + @xpanes -s -c "make -f $(ROOTDIR)/Makefile {}" \ + docker/build/agent \ + docker/build/agent-faiss \ + docker/build/agent-ngt \ + docker/build/agent-sidecar \ + docker/build/benchmark-job \ + docker/build/benchmark-operator \ + docker/build/binfmt \ + docker/build/buildbase \ + docker/build/buildkit \ + docker/build/buildkit-syft-scanner \ + docker/build/ci-container \ + docker/build/dev-container \ + docker/build/discoverer-k8s \ + docker/build/gateway-filter \ + docker/build/gateway-lb \ + docker/build/gateway-mirror \ + docker/build/index-correction \ + docker/build/index-creation \ + docker/build/index-operator \ + docker/build/index-save \ + docker/build/loadtest \ + docker/build/manager-index \ + docker/build/operator/helm \ + docker/build/readreplica-rotate + .PHONY: docker/name/org docker/name/org: @echo "$(ORG)" diff --git a/Makefile.d/e2e.mk b/Makefile.d/e2e.mk index 498b2ae9ca..d9502708c1 100644 --- a/Makefile.d/e2e.mk +++ b/Makefile.d/e2e.mk @@ -108,7 +108,7 @@ e2e/actions/run/stream/crud: \ sleep 2 kubectl wait -n kube-system --for=condition=Ready pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) kubectl wait -n kube-system --for=condition=ContainersReady pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) - make k8s/vald/deploy \ + $(MAKE) k8s/vald/deploy \ HELM_VALUES=$(ROOTDIR)/.github/helm/values/values-lb.yaml sleep 3 kubectl wait --for=condition=Ready pod -l "app=$(LB_GATEWAY_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) @@ -116,8 +116,8 @@ e2e/actions/run/stream/crud: \ kubectl get pods pod_name=$$(kubectl get pods --selector="app=$(LB_GATEWAY_IMAGE)" | tail -1 | awk '{print $$1}'); \ echo $$pod_name; \ - make E2E_TARGET_POD_NAME=$$pod_name e2e - make k8s/vald/delete + $(MAKE) E2E_TARGET_POD_NAME=$$pod_name e2e + $(MAKE) k8s/vald/delete $(MAKE) k3d/delete .PHONY: e2e/actions/run/job @@ -129,7 +129,7 @@ e2e/actions/run/job: \ sleep 2 kubectl wait -n kube-system --for=condition=Ready pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) kubectl wait -n kube-system --for=condition=ContainersReady pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) - make k8s/vald/deploy \ + $(MAKE) k8s/vald/deploy \ HELM_VALUES=$(ROOTDIR)/.github/helm/values/values-correction.yaml sleep 3 kubectl wait --for=condition=Ready pod -l "app=$(LB_GATEWAY_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) @@ -137,8 +137,8 @@ e2e/actions/run/job: \ kubectl get pods pod_name=$$(kubectl get pods --selector="app=$(LB_GATEWAY_IMAGE)" | tail -1 | awk '{print $$1}'); \ echo $$pod_name; \ - make E2E_TARGET_POD_NAME=$$pod_name e2e/index/job/correction - make k8s/vald/delete + $(MAKE) E2E_TARGET_POD_NAME=$$pod_name e2e/index/job/correction + $(MAKE) k8s/vald/delete $(MAKE) k3d/delete .PHONY: e2e/actions/run/readreplica @@ -151,12 +151,12 @@ e2e/actions/run/readreplica: \ kubectl wait -n kube-system --for=condition=Ready pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) kubectl wait -n kube-system --for=condition=ContainersReady pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) sleep 3 - make k8s/vald/deploy \ + $(MAKE) k8s/vald/deploy \ HELM_VALUES=$(ROOTDIR)/.github/helm/values/values-readreplica.yaml sleep 20 kubectl wait --for=condition=Ready pod -l "app=$(AGENT_NGT_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) kubectl wait --for=condition=ContainersReady pod -l "app=$(AGENT_NGT_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) - make k8s/vald-readreplica/deploy \ + $(MAKE) k8s/vald-readreplica/deploy \ HELM_VALUES=$(ROOTDIR)/.github/helm/values/values-readreplica.yaml sleep 3 kubectl wait --for=condition=Ready pod -l "app=$(LB_GATEWAY_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) @@ -164,6 +164,29 @@ e2e/actions/run/readreplica: \ kubectl get pods pod_name=$$(kubectl get pods --selector="app=$(LB_GATEWAY_IMAGE)" | tail -1 | awk '{print $$1}'); \ echo $$pod_name; \ - make E2E_TARGET_POD_NAME=$$pod_name e2e/readreplica - make k8s/vald/delete + $(MAKE) E2E_TARGET_POD_NAME=$$pod_name e2e/readreplica + $(MAKE) k8s/vald/delete $(MAKE) minikube/delete + +.PHONY: e2e/actions/run/stream/crud/skip +## run GitHub Actions E2E test (Stream CRUD with SkipExistsCheck = true) +e2e/actions/run/stream/crud/skip: \ + hack/benchmark/assets/dataset/$(E2E_DATASET_NAME) \ + k3d/restart + kubectl wait -n kube-system --for=condition=Available deployment/metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) + sleep 2 + kubectl wait -n kube-system --for=condition=Ready pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) + kubectl wait -n kube-system --for=condition=ContainersReady pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) + $(MAKE) k8s/vald/deploy \ + HELM_VALUES=$(ROOTDIR)/.github/helm/values/values-lb.yaml + sleep 3 + kubectl wait --for=condition=Ready pod -l "app=$(LB_GATEWAY_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) + kubectl wait --for=condition=ContainersReady pod -l "app=$(LB_GATEWAY_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) + kubectl get pods + pod_name=$$(kubectl get pods --selector="app=$(LB_GATEWAY_IMAGE)" | tail -1 | awk '{print $$1}'); \ + echo $$pod_name; \ + $(MAKE) E2E_TARGET_POD_NAME=$$pod_name e2e/skip + $(MAKE) k8s/vald/delete + $(MAKE) k3d/delete + + diff --git a/Makefile.d/functions.mk b/Makefile.d/functions.mk index 1e97c315d8..fa357d142d 100644 --- a/Makefile.d/functions.mk +++ b/Makefile.d/functions.mk @@ -42,7 +42,7 @@ define profile-web endef define go-lint - golangci-lint run --config .golangci.yml --fix + golangci-lint run --config .golangci.yaml --fix endef define go-vet diff --git a/Makefile.d/tools.mk b/Makefile.d/tools.mk index 3502f2e879..c9968e5d75 100644 --- a/Makefile.d/tools.mk +++ b/Makefile.d/tools.mk @@ -89,7 +89,34 @@ textlint/ci/install: cspell/install: $(NPM_GLOBAL_PREFIX)/bin/cspell $(NPM_GLOBAL_PREFIX)/bin/cspell: - npm install -g cspell@latest + npm install -g cspell@latest \ + @cspell/dict-cpp \ + @cspell/dict-docker \ + @cspell/dict-en_us \ + @cspell/dict-fullstack \ + @cspell/dict-git \ + @cspell/dict-golang \ + @cspell/dict-k8s \ + @cspell/dict-makefile \ + @cspell/dict-markdown \ + @cspell/dict-npm \ + @cspell/dict-public-licenses \ + @cspell/dict-rust \ + @cspell/dict-shell + cspell link add @cspell/dict-cpp + cspell link add @cspell/dict-docker + cspell link add @cspell/dict-en_us + cspell link add @cspell/dict-fullstack + cspell link add @cspell/dict-git + cspell link add @cspell/dict-golang + cspell link add @cspell/dict-k8s + cspell link add @cspell/dict-makefile + cspell link add @cspell/dict-markdown + cspell link add @cspell/dict-npm + cspell link add @cspell/dict-public-licenses + cspell link add @cspell/dict-rust + cspell link add @cspell/dict-shell + .PHONY: buf/install buf/install: $(BINDIR)/buf diff --git a/apis/docs/v1/docs.md b/apis/docs/v1/docs.md index 1c600735b2..6961d81288 100644 --- a/apis/docs/v1/docs.md +++ b/apis/docs/v1/docs.md @@ -96,6 +96,7 @@ - [Update.MultiRequest](#payload-v1-Update-MultiRequest) - [Update.ObjectRequest](#payload-v1-Update-ObjectRequest) - [Update.Request](#payload-v1-Update-Request) + - [Update.TimestampRequest](#payload-v1-Update-TimestampRequest) - [Upsert](#payload-v1-Upsert) - [Upsert.Config](#payload-v1-Upsert-Config) - [Upsert.MultiObjectRequest](#payload-v1-Upsert-MultiObjectRequest) @@ -1118,6 +1119,18 @@ Represent the update request. | vector | [Object.Vector](#payload-v1-Object-Vector) | | The vector to be updated. | | config | [Update.Config](#payload-v1-Update-Config) | | The configuration of the update request. | + + +### Update.TimestampRequest + +Represent a vector meta data. + +| Field | Type | Label | Description | +| --------- | ----------------- | ----- | ------------------------------------------------- | +| id | [string](#string) | | The vector ID. | +| timestamp | [int64](#int64) | | timestamp represents when this vector inserted. | +| force | [bool](#bool) | | force represents forcefully update the timestamp. | + ### Upsert @@ -1730,11 +1743,12 @@ Search service provides ways to search indexed vectors. Update service provides ways to update indexed vectors. -| Method Name | Request Type | Response Type | Description | -| ------------ | ------------------------------------------------------------------ | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------- | -| Update | [.payload.v1.Update.Request](#payload-v1-Update-Request) | [.payload.v1.Object.Location](#payload-v1-Object-Location) | A method to update an indexed vector. | -| StreamUpdate | [.payload.v1.Update.Request](#payload-v1-Update-Request) stream | [.payload.v1.Object.StreamLocation](#payload-v1-Object-StreamLocation) stream | A method to update multiple indexed vectors by bidirectional streaming. | -| MultiUpdate | [.payload.v1.Update.MultiRequest](#payload-v1-Update-MultiRequest) | [.payload.v1.Object.Locations](#payload-v1-Object-Locations) | A method to update multiple indexed vectors in a single request. | +| Method Name | Request Type | Response Type | Description | +| --------------- | -------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| Update | [.payload.v1.Update.Request](#payload-v1-Update-Request) | [.payload.v1.Object.Location](#payload-v1-Object-Location) | A method to update an indexed vector. | +| StreamUpdate | [.payload.v1.Update.Request](#payload-v1-Update-Request) stream | [.payload.v1.Object.StreamLocation](#payload-v1-Object-StreamLocation) stream | A method to update multiple indexed vectors by bidirectional streaming. | +| MultiUpdate | [.payload.v1.Update.MultiRequest](#payload-v1-Update-MultiRequest) | [.payload.v1.Object.Locations](#payload-v1-Object-Locations) | A method to update multiple indexed vectors in a single request. | +| UpdateTimestamp | [.payload.v1.Update.TimestampRequest](#payload-v1-Update-TimestampRequest) | [.payload.v1.Object.Location](#payload-v1-Object-Location) | A method to update timestamp an indexed vector. | diff --git a/apis/grpc/v1/agent/sidecar/sidecar_vtproto.pb.go b/apis/grpc/v1/agent/sidecar/sidecar_vtproto.pb.go index f60d8f002f..3e7fb5da06 100644 --- a/apis/grpc/v1/agent/sidecar/sidecar_vtproto.pb.go +++ b/apis/grpc/v1/agent/sidecar/sidecar_vtproto.pb.go @@ -36,7 +36,7 @@ const _ = grpc.SupportPackageIsVersion7 // SidecarClient is the client API for Sidecar service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type SidecarClient interface{} +type SidecarClient any type sidecarClient struct { cc grpc.ClientConnInterface diff --git a/apis/grpc/v1/payload/payload.pb.go b/apis/grpc/v1/payload/payload.pb.go index 3208a095e8..d1ddb70db9 100644 --- a/apis/grpc/v1/payload/payload.pb.go +++ b/apis/grpc/v1/payload/payload.pb.go @@ -1941,6 +1941,73 @@ func (x *Update_MultiObjectRequest) GetRequests() []*Update_ObjectRequest { return nil } +// Represent a vector meta data. +type Update_TimestampRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The vector ID. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // timestamp represents when this vector inserted. + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // force represents forcefully update the timestamp. + Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` +} + +func (x *Update_TimestampRequest) Reset() { + *x = Update_TimestampRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_v1_payload_payload_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Update_TimestampRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Update_TimestampRequest) ProtoMessage() {} + +func (x *Update_TimestampRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_payload_payload_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Update_TimestampRequest.ProtoReflect.Descriptor instead. +func (*Update_TimestampRequest) Descriptor() ([]byte, []int) { + return file_v1_payload_payload_proto_rawDescGZIP(), []int{3, 4} +} + +func (x *Update_TimestampRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Update_TimestampRequest) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *Update_TimestampRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + // Represent the update configuration. type Update_Config struct { state protoimpl.MessageState @@ -1961,7 +2028,7 @@ type Update_Config struct { func (x *Update_Config) Reset() { *x = Update_Config{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[34] + mi := &file_v1_payload_payload_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1974,7 +2041,7 @@ func (x *Update_Config) String() string { func (*Update_Config) ProtoMessage() {} func (x *Update_Config) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[34] + mi := &file_v1_payload_payload_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1987,7 +2054,7 @@ func (x *Update_Config) ProtoReflect() protoreflect.Message { // Deprecated: Use Update_Config.ProtoReflect.Descriptor instead. func (*Update_Config) Descriptor() ([]byte, []int) { - return file_v1_payload_payload_proto_rawDescGZIP(), []int{3, 4} + return file_v1_payload_payload_proto_rawDescGZIP(), []int{3, 5} } func (x *Update_Config) GetSkipStrictExistCheck() bool { @@ -2033,7 +2100,7 @@ type Upsert_Request struct { func (x *Upsert_Request) Reset() { *x = Upsert_Request{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[35] + mi := &file_v1_payload_payload_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2046,7 +2113,7 @@ func (x *Upsert_Request) String() string { func (*Upsert_Request) ProtoMessage() {} func (x *Upsert_Request) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[35] + mi := &file_v1_payload_payload_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2089,7 +2156,7 @@ type Upsert_MultiRequest struct { func (x *Upsert_MultiRequest) Reset() { *x = Upsert_MultiRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[36] + mi := &file_v1_payload_payload_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2102,7 +2169,7 @@ func (x *Upsert_MultiRequest) String() string { func (*Upsert_MultiRequest) ProtoMessage() {} func (x *Upsert_MultiRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[36] + mi := &file_v1_payload_payload_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2142,7 +2209,7 @@ type Upsert_ObjectRequest struct { func (x *Upsert_ObjectRequest) Reset() { *x = Upsert_ObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[37] + mi := &file_v1_payload_payload_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2155,7 +2222,7 @@ func (x *Upsert_ObjectRequest) String() string { func (*Upsert_ObjectRequest) ProtoMessage() {} func (x *Upsert_ObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[37] + mi := &file_v1_payload_payload_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2205,7 +2272,7 @@ type Upsert_MultiObjectRequest struct { func (x *Upsert_MultiObjectRequest) Reset() { *x = Upsert_MultiObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[38] + mi := &file_v1_payload_payload_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2218,7 +2285,7 @@ func (x *Upsert_MultiObjectRequest) String() string { func (*Upsert_MultiObjectRequest) ProtoMessage() {} func (x *Upsert_MultiObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[38] + mi := &file_v1_payload_payload_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2261,7 +2328,7 @@ type Upsert_Config struct { func (x *Upsert_Config) Reset() { *x = Upsert_Config{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[39] + mi := &file_v1_payload_payload_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2274,7 +2341,7 @@ func (x *Upsert_Config) String() string { func (*Upsert_Config) ProtoMessage() {} func (x *Upsert_Config) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[39] + mi := &file_v1_payload_payload_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2333,7 +2400,7 @@ type Remove_Request struct { func (x *Remove_Request) Reset() { *x = Remove_Request{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[40] + mi := &file_v1_payload_payload_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2346,7 +2413,7 @@ func (x *Remove_Request) String() string { func (*Remove_Request) ProtoMessage() {} func (x *Remove_Request) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[40] + mi := &file_v1_payload_payload_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2389,7 +2456,7 @@ type Remove_MultiRequest struct { func (x *Remove_MultiRequest) Reset() { *x = Remove_MultiRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[41] + mi := &file_v1_payload_payload_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2402,7 +2469,7 @@ func (x *Remove_MultiRequest) String() string { func (*Remove_MultiRequest) ProtoMessage() {} func (x *Remove_MultiRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[41] + mi := &file_v1_payload_payload_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2439,7 +2506,7 @@ type Remove_TimestampRequest struct { func (x *Remove_TimestampRequest) Reset() { *x = Remove_TimestampRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[42] + mi := &file_v1_payload_payload_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2452,7 +2519,7 @@ func (x *Remove_TimestampRequest) String() string { func (*Remove_TimestampRequest) ProtoMessage() {} func (x *Remove_TimestampRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[42] + mi := &file_v1_payload_payload_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2490,7 +2557,7 @@ type Remove_Timestamp struct { func (x *Remove_Timestamp) Reset() { *x = Remove_Timestamp{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[43] + mi := &file_v1_payload_payload_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2503,7 +2570,7 @@ func (x *Remove_Timestamp) String() string { func (*Remove_Timestamp) ProtoMessage() {} func (x *Remove_Timestamp) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[43] + mi := &file_v1_payload_payload_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2548,7 +2615,7 @@ type Remove_Config struct { func (x *Remove_Config) Reset() { *x = Remove_Config{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[44] + mi := &file_v1_payload_payload_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2561,7 +2628,7 @@ func (x *Remove_Config) String() string { func (*Remove_Config) ProtoMessage() {} func (x *Remove_Config) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[44] + mi := &file_v1_payload_payload_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2600,7 +2667,7 @@ type Flush_Request struct { func (x *Flush_Request) Reset() { *x = Flush_Request{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[45] + mi := &file_v1_payload_payload_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2613,7 +2680,7 @@ func (x *Flush_Request) String() string { func (*Flush_Request) ProtoMessage() {} func (x *Flush_Request) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[45] + mi := &file_v1_payload_payload_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2644,7 +2711,7 @@ type Object_VectorRequest struct { func (x *Object_VectorRequest) Reset() { *x = Object_VectorRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[46] + mi := &file_v1_payload_payload_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2657,7 +2724,7 @@ func (x *Object_VectorRequest) String() string { func (*Object_VectorRequest) ProtoMessage() {} func (x *Object_VectorRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[46] + mi := &file_v1_payload_payload_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2702,7 +2769,7 @@ type Object_Distance struct { func (x *Object_Distance) Reset() { *x = Object_Distance{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[47] + mi := &file_v1_payload_payload_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2715,7 +2782,7 @@ func (x *Object_Distance) String() string { func (*Object_Distance) ProtoMessage() {} func (x *Object_Distance) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[47] + mi := &file_v1_payload_payload_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2761,7 +2828,7 @@ type Object_StreamDistance struct { func (x *Object_StreamDistance) Reset() { *x = Object_StreamDistance{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[48] + mi := &file_v1_payload_payload_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2774,7 +2841,7 @@ func (x *Object_StreamDistance) String() string { func (*Object_StreamDistance) ProtoMessage() {} func (x *Object_StreamDistance) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[48] + mi := &file_v1_payload_payload_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2841,7 +2908,7 @@ type Object_ID struct { func (x *Object_ID) Reset() { *x = Object_ID{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[49] + mi := &file_v1_payload_payload_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2854,7 +2921,7 @@ func (x *Object_ID) String() string { func (*Object_ID) ProtoMessage() {} func (x *Object_ID) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[49] + mi := &file_v1_payload_payload_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2889,7 +2956,7 @@ type Object_IDs struct { func (x *Object_IDs) Reset() { *x = Object_IDs{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[50] + mi := &file_v1_payload_payload_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2902,7 +2969,7 @@ func (x *Object_IDs) String() string { func (*Object_IDs) ProtoMessage() {} func (x *Object_IDs) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[50] + mi := &file_v1_payload_payload_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2942,7 +3009,7 @@ type Object_Vector struct { func (x *Object_Vector) Reset() { *x = Object_Vector{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[51] + mi := &file_v1_payload_payload_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2955,7 +3022,7 @@ func (x *Object_Vector) String() string { func (*Object_Vector) ProtoMessage() {} func (x *Object_Vector) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[51] + mi := &file_v1_payload_payload_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3005,7 +3072,7 @@ type Object_TimestampRequest struct { func (x *Object_TimestampRequest) Reset() { *x = Object_TimestampRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[52] + mi := &file_v1_payload_payload_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3018,7 +3085,7 @@ func (x *Object_TimestampRequest) String() string { func (*Object_TimestampRequest) ProtoMessage() {} func (x *Object_TimestampRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[52] + mi := &file_v1_payload_payload_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3056,7 +3123,7 @@ type Object_Timestamp struct { func (x *Object_Timestamp) Reset() { *x = Object_Timestamp{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[53] + mi := &file_v1_payload_payload_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3069,7 +3136,7 @@ func (x *Object_Timestamp) String() string { func (*Object_Timestamp) ProtoMessage() {} func (x *Object_Timestamp) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[53] + mi := &file_v1_payload_payload_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3111,7 +3178,7 @@ type Object_Vectors struct { func (x *Object_Vectors) Reset() { *x = Object_Vectors{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[54] + mi := &file_v1_payload_payload_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3124,7 +3191,7 @@ func (x *Object_Vectors) String() string { func (*Object_Vectors) ProtoMessage() {} func (x *Object_Vectors) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[54] + mi := &file_v1_payload_payload_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3163,7 +3230,7 @@ type Object_StreamVector struct { func (x *Object_StreamVector) Reset() { *x = Object_StreamVector{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[55] + mi := &file_v1_payload_payload_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3176,7 +3243,7 @@ func (x *Object_StreamVector) String() string { func (*Object_StreamVector) ProtoMessage() {} func (x *Object_StreamVector) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[55] + mi := &file_v1_payload_payload_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3246,7 +3313,7 @@ type Object_ReshapeVector struct { func (x *Object_ReshapeVector) Reset() { *x = Object_ReshapeVector{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[56] + mi := &file_v1_payload_payload_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3259,7 +3326,7 @@ func (x *Object_ReshapeVector) String() string { func (*Object_ReshapeVector) ProtoMessage() {} func (x *Object_ReshapeVector) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[56] + mi := &file_v1_payload_payload_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3304,7 +3371,7 @@ type Object_Blob struct { func (x *Object_Blob) Reset() { *x = Object_Blob{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[57] + mi := &file_v1_payload_payload_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3317,7 +3384,7 @@ func (x *Object_Blob) String() string { func (*Object_Blob) ProtoMessage() {} func (x *Object_Blob) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[57] + mi := &file_v1_payload_payload_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3363,7 +3430,7 @@ type Object_StreamBlob struct { func (x *Object_StreamBlob) Reset() { *x = Object_StreamBlob{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[58] + mi := &file_v1_payload_payload_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3376,7 +3443,7 @@ func (x *Object_StreamBlob) String() string { func (*Object_StreamBlob) ProtoMessage() {} func (x *Object_StreamBlob) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[58] + mi := &file_v1_payload_payload_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3448,7 +3515,7 @@ type Object_Location struct { func (x *Object_Location) Reset() { *x = Object_Location{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[59] + mi := &file_v1_payload_payload_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3461,7 +3528,7 @@ func (x *Object_Location) String() string { func (*Object_Location) ProtoMessage() {} func (x *Object_Location) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[59] + mi := &file_v1_payload_payload_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3514,7 +3581,7 @@ type Object_StreamLocation struct { func (x *Object_StreamLocation) Reset() { *x = Object_StreamLocation{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[60] + mi := &file_v1_payload_payload_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3527,7 +3594,7 @@ func (x *Object_StreamLocation) String() string { func (*Object_StreamLocation) ProtoMessage() {} func (x *Object_StreamLocation) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[60] + mi := &file_v1_payload_payload_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3594,7 +3661,7 @@ type Object_Locations struct { func (x *Object_Locations) Reset() { *x = Object_Locations{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[61] + mi := &file_v1_payload_payload_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3607,7 +3674,7 @@ func (x *Object_Locations) String() string { func (*Object_Locations) ProtoMessage() {} func (x *Object_Locations) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[61] + mi := &file_v1_payload_payload_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3640,7 +3707,7 @@ type Object_List struct { func (x *Object_List) Reset() { *x = Object_List{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[62] + mi := &file_v1_payload_payload_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3653,7 +3720,7 @@ func (x *Object_List) String() string { func (*Object_List) ProtoMessage() {} func (x *Object_List) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[62] + mi := &file_v1_payload_payload_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3678,7 +3745,7 @@ type Object_List_Request struct { func (x *Object_List_Request) Reset() { *x = Object_List_Request{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[63] + mi := &file_v1_payload_payload_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3691,7 +3758,7 @@ func (x *Object_List_Request) String() string { func (*Object_List_Request) ProtoMessage() {} func (x *Object_List_Request) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[63] + mi := &file_v1_payload_payload_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3722,7 +3789,7 @@ type Object_List_Response struct { func (x *Object_List_Response) Reset() { *x = Object_List_Response{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[64] + mi := &file_v1_payload_payload_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3735,7 +3802,7 @@ func (x *Object_List_Response) String() string { func (*Object_List_Response) ProtoMessage() {} func (x *Object_List_Response) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[64] + mi := &file_v1_payload_payload_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3803,7 +3870,7 @@ type Control_CreateIndexRequest struct { func (x *Control_CreateIndexRequest) Reset() { *x = Control_CreateIndexRequest{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[65] + mi := &file_v1_payload_payload_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3816,7 +3883,7 @@ func (x *Control_CreateIndexRequest) String() string { func (*Control_CreateIndexRequest) ProtoMessage() {} func (x *Control_CreateIndexRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[65] + mi := &file_v1_payload_payload_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3856,7 +3923,7 @@ type Discoverer_Request struct { func (x *Discoverer_Request) Reset() { *x = Discoverer_Request{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[66] + mi := &file_v1_payload_payload_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3869,7 +3936,7 @@ func (x *Discoverer_Request) String() string { func (*Discoverer_Request) ProtoMessage() {} func (x *Discoverer_Request) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[66] + mi := &file_v1_payload_payload_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3916,7 +3983,7 @@ type Info_Index struct { func (x *Info_Index) Reset() { *x = Info_Index{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[67] + mi := &file_v1_payload_payload_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3929,7 +3996,7 @@ func (x *Info_Index) String() string { func (*Info_Index) ProtoMessage() {} func (x *Info_Index) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[67] + mi := &file_v1_payload_payload_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3970,7 +4037,7 @@ type Info_Pod struct { func (x *Info_Pod) Reset() { *x = Info_Pod{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[68] + mi := &file_v1_payload_payload_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3983,7 +4050,7 @@ func (x *Info_Pod) String() string { func (*Info_Pod) ProtoMessage() {} func (x *Info_Pod) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[68] + mi := &file_v1_payload_payload_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4071,7 +4138,7 @@ type Info_Node struct { func (x *Info_Node) Reset() { *x = Info_Node{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[69] + mi := &file_v1_payload_payload_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4084,7 +4151,7 @@ func (x *Info_Node) String() string { func (*Info_Node) ProtoMessage() {} func (x *Info_Node) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[69] + mi := &file_v1_payload_payload_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4165,7 +4232,7 @@ type Info_Service struct { func (x *Info_Service) Reset() { *x = Info_Service{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[70] + mi := &file_v1_payload_payload_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4178,7 +4245,7 @@ func (x *Info_Service) String() string { func (*Info_Service) ProtoMessage() {} func (x *Info_Service) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[70] + mi := &file_v1_payload_payload_proto_msgTypes[71] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4251,7 +4318,7 @@ type Info_ServicePort struct { func (x *Info_ServicePort) Reset() { *x = Info_ServicePort{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[71] + mi := &file_v1_payload_payload_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4264,7 +4331,7 @@ func (x *Info_ServicePort) String() string { func (*Info_ServicePort) ProtoMessage() {} func (x *Info_ServicePort) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[71] + mi := &file_v1_payload_payload_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4306,7 +4373,7 @@ type Info_Labels struct { func (x *Info_Labels) Reset() { *x = Info_Labels{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[72] + mi := &file_v1_payload_payload_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4319,7 +4386,7 @@ func (x *Info_Labels) String() string { func (*Info_Labels) ProtoMessage() {} func (x *Info_Labels) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[72] + mi := &file_v1_payload_payload_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4354,7 +4421,7 @@ type Info_Annotations struct { func (x *Info_Annotations) Reset() { *x = Info_Annotations{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[73] + mi := &file_v1_payload_payload_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4367,7 +4434,7 @@ func (x *Info_Annotations) String() string { func (*Info_Annotations) ProtoMessage() {} func (x *Info_Annotations) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[73] + mi := &file_v1_payload_payload_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4407,7 +4474,7 @@ type Info_CPU struct { func (x *Info_CPU) Reset() { *x = Info_CPU{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[74] + mi := &file_v1_payload_payload_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4420,7 +4487,7 @@ func (x *Info_CPU) String() string { func (*Info_CPU) ProtoMessage() {} func (x *Info_CPU) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[74] + mi := &file_v1_payload_payload_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4474,7 +4541,7 @@ type Info_Memory struct { func (x *Info_Memory) Reset() { *x = Info_Memory{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[75] + mi := &file_v1_payload_payload_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4487,7 +4554,7 @@ func (x *Info_Memory) String() string { func (*Info_Memory) ProtoMessage() {} func (x *Info_Memory) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[75] + mi := &file_v1_payload_payload_proto_msgTypes[76] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4537,7 +4604,7 @@ type Info_Pods struct { func (x *Info_Pods) Reset() { *x = Info_Pods{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[76] + mi := &file_v1_payload_payload_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4550,7 +4617,7 @@ func (x *Info_Pods) String() string { func (*Info_Pods) ProtoMessage() {} func (x *Info_Pods) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[76] + mi := &file_v1_payload_payload_proto_msgTypes[77] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4586,7 +4653,7 @@ type Info_Nodes struct { func (x *Info_Nodes) Reset() { *x = Info_Nodes{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[77] + mi := &file_v1_payload_payload_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4599,7 +4666,7 @@ func (x *Info_Nodes) String() string { func (*Info_Nodes) ProtoMessage() {} func (x *Info_Nodes) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[77] + mi := &file_v1_payload_payload_proto_msgTypes[78] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4635,7 +4702,7 @@ type Info_Services struct { func (x *Info_Services) Reset() { *x = Info_Services{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[78] + mi := &file_v1_payload_payload_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4648,7 +4715,7 @@ func (x *Info_Services) String() string { func (*Info_Services) ProtoMessage() {} func (x *Info_Services) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[78] + mi := &file_v1_payload_payload_proto_msgTypes[79] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4683,7 +4750,7 @@ type Info_IPs struct { func (x *Info_IPs) Reset() { *x = Info_IPs{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[79] + mi := &file_v1_payload_payload_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4696,7 +4763,7 @@ func (x *Info_IPs) String() string { func (*Info_IPs) ProtoMessage() {} func (x *Info_IPs) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[79] + mi := &file_v1_payload_payload_proto_msgTypes[80] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4738,7 +4805,7 @@ type Info_Index_Count struct { func (x *Info_Index_Count) Reset() { *x = Info_Index_Count{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[80] + mi := &file_v1_payload_payload_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4751,7 +4818,7 @@ func (x *Info_Index_Count) String() string { func (*Info_Index_Count) ProtoMessage() {} func (x *Info_Index_Count) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[80] + mi := &file_v1_payload_payload_proto_msgTypes[81] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4812,7 +4879,7 @@ type Info_Index_Detail struct { func (x *Info_Index_Detail) Reset() { *x = Info_Index_Detail{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[81] + mi := &file_v1_payload_payload_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4825,7 +4892,7 @@ func (x *Info_Index_Detail) String() string { func (*Info_Index_Detail) ProtoMessage() {} func (x *Info_Index_Detail) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[81] + mi := &file_v1_payload_payload_proto_msgTypes[82] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4872,7 +4939,7 @@ type Info_Index_UUID struct { func (x *Info_Index_UUID) Reset() { *x = Info_Index_UUID{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[82] + mi := &file_v1_payload_payload_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4885,7 +4952,7 @@ func (x *Info_Index_UUID) String() string { func (*Info_Index_UUID) ProtoMessage() {} func (x *Info_Index_UUID) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[82] + mi := &file_v1_payload_payload_proto_msgTypes[83] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4945,7 +5012,7 @@ type Info_Index_Statistics struct { func (x *Info_Index_Statistics) Reset() { *x = Info_Index_Statistics{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[83] + mi := &file_v1_payload_payload_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4958,7 +5025,7 @@ func (x *Info_Index_Statistics) String() string { func (*Info_Index_Statistics) ProtoMessage() {} func (x *Info_Index_Statistics) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[83] + mi := &file_v1_payload_payload_proto_msgTypes[84] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5218,7 +5285,7 @@ type Info_Index_StatisticsDetail struct { func (x *Info_Index_StatisticsDetail) Reset() { *x = Info_Index_StatisticsDetail{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[84] + mi := &file_v1_payload_payload_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5231,7 +5298,7 @@ func (x *Info_Index_StatisticsDetail) String() string { func (*Info_Index_StatisticsDetail) ProtoMessage() {} func (x *Info_Index_StatisticsDetail) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[84] + mi := &file_v1_payload_payload_proto_msgTypes[85] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5299,7 +5366,7 @@ type Info_Index_Property struct { func (x *Info_Index_Property) Reset() { *x = Info_Index_Property{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[85] + mi := &file_v1_payload_payload_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5312,7 +5379,7 @@ func (x *Info_Index_Property) String() string { func (*Info_Index_Property) ProtoMessage() {} func (x *Info_Index_Property) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[85] + mi := &file_v1_payload_payload_proto_msgTypes[86] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5578,7 +5645,7 @@ type Info_Index_PropertyDetail struct { func (x *Info_Index_PropertyDetail) Reset() { *x = Info_Index_PropertyDetail{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[86] + mi := &file_v1_payload_payload_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5591,7 +5658,7 @@ func (x *Info_Index_PropertyDetail) String() string { func (*Info_Index_PropertyDetail) ProtoMessage() {} func (x *Info_Index_PropertyDetail) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[86] + mi := &file_v1_payload_payload_proto_msgTypes[87] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5626,7 +5693,7 @@ type Info_Index_UUID_Committed struct { func (x *Info_Index_UUID_Committed) Reset() { *x = Info_Index_UUID_Committed{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[88] + mi := &file_v1_payload_payload_proto_msgTypes[89] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5639,7 +5706,7 @@ func (x *Info_Index_UUID_Committed) String() string { func (*Info_Index_UUID_Committed) ProtoMessage() {} func (x *Info_Index_UUID_Committed) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[88] + mi := &file_v1_payload_payload_proto_msgTypes[89] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5674,7 +5741,7 @@ type Info_Index_UUID_Uncommitted struct { func (x *Info_Index_UUID_Uncommitted) Reset() { *x = Info_Index_UUID_Uncommitted{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[89] + mi := &file_v1_payload_payload_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5687,7 +5754,7 @@ func (x *Info_Index_UUID_Uncommitted) String() string { func (*Info_Index_UUID_Uncommitted) ProtoMessage() {} func (x *Info_Index_UUID_Uncommitted) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[89] + mi := &file_v1_payload_payload_proto_msgTypes[90] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5725,7 +5792,7 @@ type Mirror_Target struct { func (x *Mirror_Target) Reset() { *x = Mirror_Target{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[94] + mi := &file_v1_payload_payload_proto_msgTypes[95] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5738,7 +5805,7 @@ func (x *Mirror_Target) String() string { func (*Mirror_Target) ProtoMessage() {} func (x *Mirror_Target) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[94] + mi := &file_v1_payload_payload_proto_msgTypes[95] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5781,7 +5848,7 @@ type Mirror_Targets struct { func (x *Mirror_Targets) Reset() { *x = Mirror_Targets{} if protoimpl.UnsafeEnabled { - mi := &file_v1_payload_payload_proto_msgTypes[95] + mi := &file_v1_payload_payload_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5794,7 +5861,7 @@ func (x *Mirror_Targets) String() string { func (*Mirror_Targets) ProtoMessage() {} func (x *Mirror_Targets) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[95] + mi := &file_v1_payload_payload_proto_msgTypes[96] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6002,6 +6069,7 @@ var file_v1_payload_payload_proto_rawDesc = []byte{ 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, +<<<<<<< HEAD 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x36, 0x0a, 0x17, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x69, 0x73, @@ -6549,6 +6617,599 @@ var file_v1_payload_payload_proto_rawDesc = []byte{ 0x70, 0x69, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0xa2, 0x02, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +======= + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xfe, 0x05, 0x0a, 0x06, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x1a, 0x79, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3b, 0x0a, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x08, 0xba, 0x48, 0x05, + 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x31, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x1a, 0x46, 0x0a, 0x0c, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x36, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0xae, 0x01, 0x0a, 0x0d, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x42, + 0x6c, 0x6f, 0x62, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, + 0x0a, 0x0a, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x0a, 0x76, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x72, 0x1a, 0x52, 0x0a, 0x12, 0x4d, 0x75, 0x6c, + 0x74, 0x69, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3c, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0x5f, 0x0a, + 0x10, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, + 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x1a, 0xca, + 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x6b, 0x69, + 0x70, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x6b, 0x69, 0x70, + 0x53, 0x74, 0x72, 0x69, 0x63, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x12, 0x33, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x12, 0x36, 0x0a, 0x17, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x64, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x9d, 0x05, 0x0a, 0x06, + 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x1a, 0x79, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x08, 0xba, 0x48, + 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x31, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, + 0x72, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x1a, 0x46, 0x0a, 0x0c, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x36, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0xae, 0x01, 0x0a, 0x0d, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x31, 0x0a, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x39, 0x0a, 0x0a, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x0a, + 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x72, 0x1a, 0x52, 0x0a, 0x12, 0x4d, 0x75, + 0x6c, 0x74, 0x69, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3c, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0xca, + 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x6b, 0x69, + 0x70, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x5f, 0x63, + 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x6b, 0x69, 0x70, + 0x53, 0x74, 0x72, 0x69, 0x63, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x12, 0x33, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x12, 0x36, 0x0a, 0x17, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x65, 0x64, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x91, 0x04, 0x0a, 0x06, + 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x1a, 0x63, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x25, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x49, 0x44, 0x52, 0x02, 0x69, 0x64, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x2e, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x46, 0x0a, 0x0c, 0x4d, + 0x75, 0x6c, 0x74, 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x1a, 0x50, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x73, 0x1a, 0xa8, 0x01, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x41, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x6f, 0x72, 0x22, 0x3a, 0x0a, 0x08, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x12, 0x06, 0x0a, 0x02, 0x45, 0x71, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x4e, 0x65, 0x10, 0x01, + 0x12, 0x06, 0x0a, 0x02, 0x47, 0x65, 0x10, 0x02, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x74, 0x10, 0x03, + 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x65, 0x10, 0x04, 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x74, 0x10, 0x05, + 0x1a, 0x5d, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x6b, + 0x69, 0x70, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x5f, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x6b, 0x69, + 0x70, 0x53, 0x74, 0x72, 0x69, 0x63, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, + 0x12, 0x0a, 0x05, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0xb1, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x1a, 0x75, + 0x0a, 0x0d, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2f, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, + 0x49, 0x44, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x33, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x36, 0x0a, 0x08, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x02, 0x52, 0x08, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x1a, 0x84, 0x01, + 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x12, 0x39, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x48, + 0x00, 0x52, 0x08, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, + 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x1d, 0x0a, 0x02, 0x49, 0x44, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, + 0x02, 0x69, 0x64, 0x1a, 0x17, 0x0a, 0x03, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x1a, 0x61, 0x0a, 0x06, + 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x20, 0x0a, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x02, 0x42, + 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1a, + 0x43, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x49, 0x44, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, + 0x52, 0x02, 0x69, 0x64, 0x1a, 0x42, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, + 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1a, 0x3e, 0x0a, 0x07, 0x56, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x73, 0x12, 0x33, 0x0a, 0x07, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, + 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, + 0x07, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x7c, 0x0a, 0x0c, 0x53, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x33, 0x0a, 0x06, 0x76, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x56, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x2c, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x3d, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x68, 0x61, 0x70, + 0x65, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, 0x70, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x68, 0x61, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x04, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x17, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, + 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x1a, 0x74, + 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x2d, 0x0a, 0x04, + 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x42, + 0x6c, 0x6f, 0x62, 0x48, 0x00, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x2c, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, + 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x44, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x70, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x70, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x0e, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, + 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x08, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x1a, 0x46, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x39, + 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x04, 0x4c, 0x69, + 0x73, 0x74, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, + 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x76, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x56, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x2c, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x1a, 0x3a, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x09, 0x70, 0x6f, 0x6f, 0x6c, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x07, 0xba, 0x48, 0x04, + 0x2a, 0x02, 0x28, 0x00, 0x52, 0x08, 0x70, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x66, + 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x65, 0x72, 0x1a, 0x58, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0xc2, 0x2b, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x1a, + 0x80, 0x20, 0x0a, 0x05, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x75, 0x0a, 0x05, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x06, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x6e, + 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0b, 0x75, 0x6e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x61, 0x76, 0x69, + 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x61, 0x76, 0x69, 0x6e, 0x67, + 0x1a, 0xdf, 0x01, 0x0a, 0x06, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x41, 0x0a, 0x06, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x07, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x76, 0x65, + 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6c, + 0x69, 0x76, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x57, 0x0a, 0x0b, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x04, 0x55, 0x55, 0x49, 0x44, 0x1a, 0x1f, 0x0a, 0x09, 0x43, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x1a, 0x21, 0x0a, 0x0b, 0x55, + 0x6e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x1a, 0x9d, + 0x0d, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x65, + 0x64, 0x69, 0x61, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x4f, 0x75, + 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x4f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x35, 0x0a, 0x17, + 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x75, + 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x6d, + 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, + 0x72, 0x65, 0x65, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x13, 0x6d, 0x69, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, + 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x69, 0x6e, 0x5f, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, + 0x72, 0x65, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x6d, 0x69, 0x6e, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x6d, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x64, 0x65, + 0x67, 0x72, 0x65, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x6f, 0x64, 0x65, 0x5f, 0x6f, 0x75, 0x74, + 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x6f, + 0x64, 0x65, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x3a, 0x0a, 0x1a, 0x6e, + 0x6f, 0x64, 0x65, 0x73, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, + 0x5f, 0x31, 0x30, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x46, 0x6f, 0x72, + 0x31, 0x30, 0x45, 0x64, 0x67, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x23, 0x6e, 0x6f, 0x64, 0x65, 0x73, + 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x64, + 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x1f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, + 0x65, 0x64, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x44, 0x69, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, + 0x6f, 0x66, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x45, 0x64, 0x67, 0x65, 0x73, 0x12, 0x39, 0x0a, + 0x19, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, + 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x73, + 0x12, 0x40, 0x0a, 0x1d, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6e, 0x6f, + 0x64, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x65, 0x64, 0x67, 0x65, + 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x19, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, + 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x45, 0x64, 0x67, + 0x65, 0x73, 0x12, 0x46, 0x0a, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, + 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x1c, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x6f, + 0x75, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x4f, 0x66, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x12, 0x39, 0x0a, 0x19, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x13, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x73, 0x69, 0x7a, 0x65, 0x4f, 0x66, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4e, 0x0a, 0x24, + 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x6f, 0x72, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, 0x20, 0x73, 0x69, 0x7a, 0x65, + 0x4f, 0x66, 0x52, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, + 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, + 0x67, 0x72, 0x65, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x01, 0x52, 0x12, 0x76, 0x61, 0x72, 0x69, + 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x32, + 0x0a, 0x15, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x75, + 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x13, 0x76, + 0x61, 0x72, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x66, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, + 0x65, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x17, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x6d, 0x65, + 0x61, 0x6e, 0x45, 0x64, 0x67, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x3f, 0x0a, 0x1d, + 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x31, 0x30, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x73, 0x18, 0x18, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x61, 0x6e, 0x45, 0x64, 0x67, 0x65, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x46, 0x6f, 0x72, 0x31, 0x30, 0x45, 0x64, 0x67, 0x65, 0x73, 0x12, 0x4b, 0x0a, + 0x23, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x64, + 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x31, 0x30, 0x5f, 0x65, + 0x64, 0x67, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x01, 0x52, 0x1e, 0x6d, 0x65, 0x61, 0x6e, + 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x46, 0x6f, 0x72, 0x31, 0x30, 0x45, 0x64, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x1d, 0x6d, 0x65, + 0x61, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x65, 0x64, 0x67, + 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x18, 0x6d, 0x65, 0x61, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x45, + 0x64, 0x67, 0x65, 0x73, 0x50, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, + 0x31, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x0a, 0x63, 0x31, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x63, 0x35, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0a, 0x63, 0x35, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x63, 0x39, 0x35, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x1d, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x63, 0x39, 0x35, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, + 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x39, 0x39, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, + 0x72, 0x65, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x63, 0x39, 0x39, 0x4f, 0x75, + 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x64, 0x65, 0x67, + 0x72, 0x65, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x03, 0x52, + 0x0d, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, + 0x0a, 0x13, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x68, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x20, 0x20, 0x03, 0x28, 0x04, 0x52, 0x12, 0x6f, 0x75, 0x74, + 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, + 0x2d, 0x0a, 0x12, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x68, 0x69, 0x73, 0x74, + 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x21, 0x20, 0x03, 0x28, 0x04, 0x52, 0x11, 0x69, 0x6e, 0x64, + 0x65, 0x67, 0x72, 0x65, 0x65, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x1a, 0xc1, + 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x44, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x12, 0x4e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e, 0x44, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x1a, 0x5d, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0xaf, 0x0c, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, + 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x09, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, + 0x10, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x50, + 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x67, + 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x18, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, + 0x70, 0x61, 0x74, 0x68, 0x41, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x37, 0x0a, 0x18, 0x67, 0x72, 0x61, 0x70, 0x68, 0x5f, + 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x67, 0x72, 0x61, 0x70, 0x68, 0x53, + 0x68, 0x61, 0x72, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x35, 0x0a, 0x17, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x6d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x14, 0x74, 0x72, 0x65, 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x6f, + 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x6f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x66, + 0x65, 0x74, 0x63, 0x68, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, + 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0c, 0x70, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x61, 0x63, 0x63, 0x75, 0x72, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x75, 0x72, 0x61, 0x63, + 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x61, + 0x72, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x5f, 0x6d, + 0x61, 0x67, 0x6e, 0x69, 0x74, 0x75, 0x64, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, + 0x6d, 0x61, 0x78, 0x4d, 0x61, 0x67, 0x6e, 0x69, 0x74, 0x75, 0x64, 0x65, 0x12, 0x49, 0x0a, 0x22, + 0x6e, 0x5f, 0x6f, 0x66, 0x5f, 0x6e, 0x65, 0x69, 0x67, 0x68, 0x62, 0x6f, 0x72, 0x73, 0x5f, 0x66, + 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x72, 0x64, + 0x65, 0x72, 0x18, 0x11, 0x20, 0x01, 0x28, 0x05, 0x52, 0x1d, 0x6e, 0x4f, 0x66, 0x4e, 0x65, 0x69, + 0x67, 0x68, 0x62, 0x6f, 0x72, 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, + 0x6f, 0x6e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x70, 0x73, 0x69, 0x6c, + 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x12, 0x20, 0x01, 0x28, 0x02, 0x52, 0x18, 0x65, 0x70, + 0x73, 0x69, 0x6c, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, + 0x6e, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x72, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x31, 0x0a, 0x14, + 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, + 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x74, 0x72, 0x75, 0x6e, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, + 0x33, 0x0a, 0x16, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, + 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x13, 0x65, 0x64, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x14, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x18, 0x16, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x11, 0x65, 0x64, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x53, + 0x65, 0x61, 0x72, 0x63, 0x68, 0x12, 0x3e, 0x0a, 0x1c, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x01, 0x28, 0x05, 0x52, 0x18, 0x65, 0x64, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x1c, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x64, 0x69, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x65, 0x66, 0x66, 0x69, + 0x63, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x01, 0x52, 0x1a, 0x69, 0x6e, 0x73, + 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x64, 0x69, 0x75, 0x73, 0x43, 0x6f, 0x65, 0x66, + 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x65, 0x64, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x65, 0x65, 0x64, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x65, 0x64, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x1b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x18, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x50, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x35, 0x0a, 0x17, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x66, + 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1c, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x14, 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x72, 0x61, 0x70, 0x68, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x72, 0x61, + 0x70, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x61, 0x73, 0x65, + 0x18, 0x1e, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, + 0x64, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x61, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x16, 0x64, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x64, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x45, 0x64, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x52, 0x61, 0x74, 0x65, + 0x12, 0x28, 0x0a, 0x10, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x18, 0x20, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x54, 0x69, 0x6d, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x75, + 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x18, 0x21, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x45, 0x64, 0x67, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x64, 0x67, 0x65, + 0x18, 0x22, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, + 0x45, 0x64, 0x67, 0x65, 0x1a, 0xbb, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x4c, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0xef, 0x01, 0x0a, 0x03, 0x50, 0x6f, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x70, + 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x78, 0x01, 0x52, 0x02, 0x69, 0x70, + 0x12, 0x26, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x2f, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, + 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, + 0x79, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x29, 0x0a, 0x04, 0x6e, 0x6f, 0x64, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, + 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0xe8, 0x01, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x03, 0x63, + 0x70, 0x75, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, + 0x63, 0x70, 0x75, 0x12, 0x2f, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x29, 0x0a, 0x04, 0x50, 0x6f, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x64, 0x73, 0x52, 0x04, 0x50, 0x6f, 0x64, 0x73, 0x1a, + 0x82, 0x02, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x70, 0x12, 0x1f, + 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x70, 0x73, 0x12, + 0x32, 0x0a, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x70, 0x6f, + 0x72, 0x74, 0x73, 0x12, 0x2f, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x35, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x1a, 0x80, 0x01, 0x0a, 0x06, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x9e, + 0x01, 0x0a, 0x0b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, + 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, + 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x4b, 0x0a, 0x03, 0x43, 0x50, 0x55, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x4e, 0x0a, 0x06, + 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x3a, 0x0a, 0x04, + 0x50, 0x6f, 0x64, 0x73, 0x12, 0x32, 0x0a, 0x04, 0x70, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x64, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, + 0x08, 0x01, 0x52, 0x04, 0x70, 0x6f, 0x64, 0x73, 0x1a, 0x3e, 0x0a, 0x05, 0x4e, 0x6f, 0x64, 0x65, + 0x73, 0x12, 0x35, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, + 0x66, 0x6f, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x01, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x1a, 0x4a, 0x0a, 0x08, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x1a, 0x15, 0x0a, 0x03, 0x49, 0x50, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x22, 0x7a, 0x0a, 0x06, 0x4d, + 0x69, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x30, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x1a, 0x3e, 0x0a, 0x07, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x73, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x07, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x42, 0x64, 0x0a, 0x1d, 0x6f, 0x72, 0x67, 0x2e, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2e, 0x76, 0x61, + 0x6c, 0x64, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x42, 0x0b, 0x56, 0x61, 0x6c, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x01, + 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x76, 0x64, 0x61, + 0x61, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0xa2, 0x02, 0x07, 0x50, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +>>>>>>> 3ba0002d4 (Add UpdateTimestamp API (#2605)) } var ( @@ -6565,7 +7226,7 @@ func file_v1_payload_payload_proto_rawDescGZIP() []byte { var ( file_v1_payload_payload_proto_enumTypes = make([]protoimpl.EnumInfo, 2) - file_v1_payload_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 96) + file_v1_payload_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 97) file_v1_payload_payload_proto_goTypes = []any{ (Search_AggregationAlgorithm)(0), // 0: payload.v1.Search.AggregationAlgorithm (Remove_Timestamp_Operator)(0), // 1: payload.v1.Remove.Timestamp.Operator @@ -6603,160 +7264,161 @@ var ( (*Update_MultiRequest)(nil), // 33: payload.v1.Update.MultiRequest (*Update_ObjectRequest)(nil), // 34: payload.v1.Update.ObjectRequest (*Update_MultiObjectRequest)(nil), // 35: payload.v1.Update.MultiObjectRequest - (*Update_Config)(nil), // 36: payload.v1.Update.Config - (*Upsert_Request)(nil), // 37: payload.v1.Upsert.Request - (*Upsert_MultiRequest)(nil), // 38: payload.v1.Upsert.MultiRequest - (*Upsert_ObjectRequest)(nil), // 39: payload.v1.Upsert.ObjectRequest - (*Upsert_MultiObjectRequest)(nil), // 40: payload.v1.Upsert.MultiObjectRequest - (*Upsert_Config)(nil), // 41: payload.v1.Upsert.Config - (*Remove_Request)(nil), // 42: payload.v1.Remove.Request - (*Remove_MultiRequest)(nil), // 43: payload.v1.Remove.MultiRequest - (*Remove_TimestampRequest)(nil), // 44: payload.v1.Remove.TimestampRequest - (*Remove_Timestamp)(nil), // 45: payload.v1.Remove.Timestamp - (*Remove_Config)(nil), // 46: payload.v1.Remove.Config - (*Flush_Request)(nil), // 47: payload.v1.Flush.Request - (*Object_VectorRequest)(nil), // 48: payload.v1.Object.VectorRequest - (*Object_Distance)(nil), // 49: payload.v1.Object.Distance - (*Object_StreamDistance)(nil), // 50: payload.v1.Object.StreamDistance - (*Object_ID)(nil), // 51: payload.v1.Object.ID - (*Object_IDs)(nil), // 52: payload.v1.Object.IDs - (*Object_Vector)(nil), // 53: payload.v1.Object.Vector - (*Object_TimestampRequest)(nil), // 54: payload.v1.Object.TimestampRequest - (*Object_Timestamp)(nil), // 55: payload.v1.Object.Timestamp - (*Object_Vectors)(nil), // 56: payload.v1.Object.Vectors - (*Object_StreamVector)(nil), // 57: payload.v1.Object.StreamVector - (*Object_ReshapeVector)(nil), // 58: payload.v1.Object.ReshapeVector - (*Object_Blob)(nil), // 59: payload.v1.Object.Blob - (*Object_StreamBlob)(nil), // 60: payload.v1.Object.StreamBlob - (*Object_Location)(nil), // 61: payload.v1.Object.Location - (*Object_StreamLocation)(nil), // 62: payload.v1.Object.StreamLocation - (*Object_Locations)(nil), // 63: payload.v1.Object.Locations - (*Object_List)(nil), // 64: payload.v1.Object.List - (*Object_List_Request)(nil), // 65: payload.v1.Object.List.Request - (*Object_List_Response)(nil), // 66: payload.v1.Object.List.Response - (*Control_CreateIndexRequest)(nil), // 67: payload.v1.Control.CreateIndexRequest - (*Discoverer_Request)(nil), // 68: payload.v1.Discoverer.Request - (*Info_Index)(nil), // 69: payload.v1.Info.Index - (*Info_Pod)(nil), // 70: payload.v1.Info.Pod - (*Info_Node)(nil), // 71: payload.v1.Info.Node - (*Info_Service)(nil), // 72: payload.v1.Info.Service - (*Info_ServicePort)(nil), // 73: payload.v1.Info.ServicePort - (*Info_Labels)(nil), // 74: payload.v1.Info.Labels - (*Info_Annotations)(nil), // 75: payload.v1.Info.Annotations - (*Info_CPU)(nil), // 76: payload.v1.Info.CPU - (*Info_Memory)(nil), // 77: payload.v1.Info.Memory - (*Info_Pods)(nil), // 78: payload.v1.Info.Pods - (*Info_Nodes)(nil), // 79: payload.v1.Info.Nodes - (*Info_Services)(nil), // 80: payload.v1.Info.Services - (*Info_IPs)(nil), // 81: payload.v1.Info.IPs - (*Info_Index_Count)(nil), // 82: payload.v1.Info.Index.Count - (*Info_Index_Detail)(nil), // 83: payload.v1.Info.Index.Detail - (*Info_Index_UUID)(nil), // 84: payload.v1.Info.Index.UUID - (*Info_Index_Statistics)(nil), // 85: payload.v1.Info.Index.Statistics - (*Info_Index_StatisticsDetail)(nil), // 86: payload.v1.Info.Index.StatisticsDetail - (*Info_Index_Property)(nil), // 87: payload.v1.Info.Index.Property - (*Info_Index_PropertyDetail)(nil), // 88: payload.v1.Info.Index.PropertyDetail - nil, // 89: payload.v1.Info.Index.Detail.CountsEntry - (*Info_Index_UUID_Committed)(nil), // 90: payload.v1.Info.Index.UUID.Committed - (*Info_Index_UUID_Uncommitted)(nil), // 91: payload.v1.Info.Index.UUID.Uncommitted - nil, // 92: payload.v1.Info.Index.StatisticsDetail.DetailsEntry - nil, // 93: payload.v1.Info.Index.PropertyDetail.DetailsEntry - nil, // 94: payload.v1.Info.Labels.LabelsEntry - nil, // 95: payload.v1.Info.Annotations.AnnotationsEntry - (*Mirror_Target)(nil), // 96: payload.v1.Mirror.Target - (*Mirror_Targets)(nil), // 97: payload.v1.Mirror.Targets - (*wrapperspb.FloatValue)(nil), // 98: google.protobuf.FloatValue - (*status.Status)(nil), // 99: google.rpc.Status + (*Update_TimestampRequest)(nil), // 36: payload.v1.Update.TimestampRequest + (*Update_Config)(nil), // 37: payload.v1.Update.Config + (*Upsert_Request)(nil), // 38: payload.v1.Upsert.Request + (*Upsert_MultiRequest)(nil), // 39: payload.v1.Upsert.MultiRequest + (*Upsert_ObjectRequest)(nil), // 40: payload.v1.Upsert.ObjectRequest + (*Upsert_MultiObjectRequest)(nil), // 41: payload.v1.Upsert.MultiObjectRequest + (*Upsert_Config)(nil), // 42: payload.v1.Upsert.Config + (*Remove_Request)(nil), // 43: payload.v1.Remove.Request + (*Remove_MultiRequest)(nil), // 44: payload.v1.Remove.MultiRequest + (*Remove_TimestampRequest)(nil), // 45: payload.v1.Remove.TimestampRequest + (*Remove_Timestamp)(nil), // 46: payload.v1.Remove.Timestamp + (*Remove_Config)(nil), // 47: payload.v1.Remove.Config + (*Flush_Request)(nil), // 48: payload.v1.Flush.Request + (*Object_VectorRequest)(nil), // 49: payload.v1.Object.VectorRequest + (*Object_Distance)(nil), // 50: payload.v1.Object.Distance + (*Object_StreamDistance)(nil), // 51: payload.v1.Object.StreamDistance + (*Object_ID)(nil), // 52: payload.v1.Object.ID + (*Object_IDs)(nil), // 53: payload.v1.Object.IDs + (*Object_Vector)(nil), // 54: payload.v1.Object.Vector + (*Object_TimestampRequest)(nil), // 55: payload.v1.Object.TimestampRequest + (*Object_Timestamp)(nil), // 56: payload.v1.Object.Timestamp + (*Object_Vectors)(nil), // 57: payload.v1.Object.Vectors + (*Object_StreamVector)(nil), // 58: payload.v1.Object.StreamVector + (*Object_ReshapeVector)(nil), // 59: payload.v1.Object.ReshapeVector + (*Object_Blob)(nil), // 60: payload.v1.Object.Blob + (*Object_StreamBlob)(nil), // 61: payload.v1.Object.StreamBlob + (*Object_Location)(nil), // 62: payload.v1.Object.Location + (*Object_StreamLocation)(nil), // 63: payload.v1.Object.StreamLocation + (*Object_Locations)(nil), // 64: payload.v1.Object.Locations + (*Object_List)(nil), // 65: payload.v1.Object.List + (*Object_List_Request)(nil), // 66: payload.v1.Object.List.Request + (*Object_List_Response)(nil), // 67: payload.v1.Object.List.Response + (*Control_CreateIndexRequest)(nil), // 68: payload.v1.Control.CreateIndexRequest + (*Discoverer_Request)(nil), // 69: payload.v1.Discoverer.Request + (*Info_Index)(nil), // 70: payload.v1.Info.Index + (*Info_Pod)(nil), // 71: payload.v1.Info.Pod + (*Info_Node)(nil), // 72: payload.v1.Info.Node + (*Info_Service)(nil), // 73: payload.v1.Info.Service + (*Info_ServicePort)(nil), // 74: payload.v1.Info.ServicePort + (*Info_Labels)(nil), // 75: payload.v1.Info.Labels + (*Info_Annotations)(nil), // 76: payload.v1.Info.Annotations + (*Info_CPU)(nil), // 77: payload.v1.Info.CPU + (*Info_Memory)(nil), // 78: payload.v1.Info.Memory + (*Info_Pods)(nil), // 79: payload.v1.Info.Pods + (*Info_Nodes)(nil), // 80: payload.v1.Info.Nodes + (*Info_Services)(nil), // 81: payload.v1.Info.Services + (*Info_IPs)(nil), // 82: payload.v1.Info.IPs + (*Info_Index_Count)(nil), // 83: payload.v1.Info.Index.Count + (*Info_Index_Detail)(nil), // 84: payload.v1.Info.Index.Detail + (*Info_Index_UUID)(nil), // 85: payload.v1.Info.Index.UUID + (*Info_Index_Statistics)(nil), // 86: payload.v1.Info.Index.Statistics + (*Info_Index_StatisticsDetail)(nil), // 87: payload.v1.Info.Index.StatisticsDetail + (*Info_Index_Property)(nil), // 88: payload.v1.Info.Index.Property + (*Info_Index_PropertyDetail)(nil), // 89: payload.v1.Info.Index.PropertyDetail + nil, // 90: payload.v1.Info.Index.Detail.CountsEntry + (*Info_Index_UUID_Committed)(nil), // 91: payload.v1.Info.Index.UUID.Committed + (*Info_Index_UUID_Uncommitted)(nil), // 92: payload.v1.Info.Index.UUID.Uncommitted + nil, // 93: payload.v1.Info.Index.StatisticsDetail.DetailsEntry + nil, // 94: payload.v1.Info.Index.PropertyDetail.DetailsEntry + nil, // 95: payload.v1.Info.Labels.LabelsEntry + nil, // 96: payload.v1.Info.Annotations.AnnotationsEntry + (*Mirror_Target)(nil), // 97: payload.v1.Mirror.Target + (*Mirror_Targets)(nil), // 98: payload.v1.Mirror.Targets + (*wrapperspb.FloatValue)(nil), // 99: google.protobuf.FloatValue + (*status.Status)(nil), // 100: google.rpc.Status } ) var file_v1_payload_payload_proto_depIdxs = []int32{ - 21, // 0: payload.v1.Search.Request.config:type_name -> payload.v1.Search.Config - 15, // 1: payload.v1.Search.MultiRequest.requests:type_name -> payload.v1.Search.Request - 21, // 2: payload.v1.Search.IDRequest.config:type_name -> payload.v1.Search.Config - 17, // 3: payload.v1.Search.MultiIDRequest.requests:type_name -> payload.v1.Search.IDRequest - 21, // 4: payload.v1.Search.ObjectRequest.config:type_name -> payload.v1.Search.Config - 25, // 5: payload.v1.Search.ObjectRequest.vectorizer:type_name -> payload.v1.Filter.Target - 19, // 6: payload.v1.Search.MultiObjectRequest.requests:type_name -> payload.v1.Search.ObjectRequest - 26, // 7: payload.v1.Search.Config.ingress_filters:type_name -> payload.v1.Filter.Config - 26, // 8: payload.v1.Search.Config.egress_filters:type_name -> payload.v1.Filter.Config - 0, // 9: payload.v1.Search.Config.aggregation_algorithm:type_name -> payload.v1.Search.AggregationAlgorithm - 98, // 10: payload.v1.Search.Config.ratio:type_name -> google.protobuf.FloatValue - 49, // 11: payload.v1.Search.Response.results:type_name -> payload.v1.Object.Distance - 22, // 12: payload.v1.Search.Responses.responses:type_name -> payload.v1.Search.Response - 22, // 13: payload.v1.Search.StreamResponse.response:type_name -> payload.v1.Search.Response - 99, // 14: payload.v1.Search.StreamResponse.status:type_name -> google.rpc.Status - 25, // 15: payload.v1.Filter.Config.targets:type_name -> payload.v1.Filter.Target - 53, // 16: payload.v1.Insert.Request.vector:type_name -> payload.v1.Object.Vector - 31, // 17: payload.v1.Insert.Request.config:type_name -> payload.v1.Insert.Config - 27, // 18: payload.v1.Insert.MultiRequest.requests:type_name -> payload.v1.Insert.Request - 59, // 19: payload.v1.Insert.ObjectRequest.object:type_name -> payload.v1.Object.Blob - 31, // 20: payload.v1.Insert.ObjectRequest.config:type_name -> payload.v1.Insert.Config - 25, // 21: payload.v1.Insert.ObjectRequest.vectorizer:type_name -> payload.v1.Filter.Target - 29, // 22: payload.v1.Insert.MultiObjectRequest.requests:type_name -> payload.v1.Insert.ObjectRequest - 26, // 23: payload.v1.Insert.Config.filters:type_name -> payload.v1.Filter.Config - 53, // 24: payload.v1.Update.Request.vector:type_name -> payload.v1.Object.Vector - 36, // 25: payload.v1.Update.Request.config:type_name -> payload.v1.Update.Config - 32, // 26: payload.v1.Update.MultiRequest.requests:type_name -> payload.v1.Update.Request - 59, // 27: payload.v1.Update.ObjectRequest.object:type_name -> payload.v1.Object.Blob - 36, // 28: payload.v1.Update.ObjectRequest.config:type_name -> payload.v1.Update.Config - 25, // 29: payload.v1.Update.ObjectRequest.vectorizer:type_name -> payload.v1.Filter.Target - 34, // 30: payload.v1.Update.MultiObjectRequest.requests:type_name -> payload.v1.Update.ObjectRequest - 26, // 31: payload.v1.Update.Config.filters:type_name -> payload.v1.Filter.Config - 53, // 32: payload.v1.Upsert.Request.vector:type_name -> payload.v1.Object.Vector - 41, // 33: payload.v1.Upsert.Request.config:type_name -> payload.v1.Upsert.Config - 37, // 34: payload.v1.Upsert.MultiRequest.requests:type_name -> payload.v1.Upsert.Request - 59, // 35: payload.v1.Upsert.ObjectRequest.object:type_name -> payload.v1.Object.Blob - 41, // 36: payload.v1.Upsert.ObjectRequest.config:type_name -> payload.v1.Upsert.Config - 25, // 37: payload.v1.Upsert.ObjectRequest.vectorizer:type_name -> payload.v1.Filter.Target - 39, // 38: payload.v1.Upsert.MultiObjectRequest.requests:type_name -> payload.v1.Upsert.ObjectRequest - 26, // 39: payload.v1.Upsert.Config.filters:type_name -> payload.v1.Filter.Config - 51, // 40: payload.v1.Remove.Request.id:type_name -> payload.v1.Object.ID - 46, // 41: payload.v1.Remove.Request.config:type_name -> payload.v1.Remove.Config - 42, // 42: payload.v1.Remove.MultiRequest.requests:type_name -> payload.v1.Remove.Request - 45, // 43: payload.v1.Remove.TimestampRequest.timestamps:type_name -> payload.v1.Remove.Timestamp - 1, // 44: payload.v1.Remove.Timestamp.operator:type_name -> payload.v1.Remove.Timestamp.Operator - 51, // 45: payload.v1.Object.VectorRequest.id:type_name -> payload.v1.Object.ID - 26, // 46: payload.v1.Object.VectorRequest.filters:type_name -> payload.v1.Filter.Config - 49, // 47: payload.v1.Object.StreamDistance.distance:type_name -> payload.v1.Object.Distance - 99, // 48: payload.v1.Object.StreamDistance.status:type_name -> google.rpc.Status - 51, // 49: payload.v1.Object.TimestampRequest.id:type_name -> payload.v1.Object.ID - 53, // 50: payload.v1.Object.Vectors.vectors:type_name -> payload.v1.Object.Vector - 53, // 51: payload.v1.Object.StreamVector.vector:type_name -> payload.v1.Object.Vector - 99, // 52: payload.v1.Object.StreamVector.status:type_name -> google.rpc.Status - 59, // 53: payload.v1.Object.StreamBlob.blob:type_name -> payload.v1.Object.Blob - 99, // 54: payload.v1.Object.StreamBlob.status:type_name -> google.rpc.Status - 61, // 55: payload.v1.Object.StreamLocation.location:type_name -> payload.v1.Object.Location - 99, // 56: payload.v1.Object.StreamLocation.status:type_name -> google.rpc.Status - 61, // 57: payload.v1.Object.Locations.locations:type_name -> payload.v1.Object.Location - 53, // 58: payload.v1.Object.List.Response.vector:type_name -> payload.v1.Object.Vector - 99, // 59: payload.v1.Object.List.Response.status:type_name -> google.rpc.Status - 76, // 60: payload.v1.Info.Pod.cpu:type_name -> payload.v1.Info.CPU - 77, // 61: payload.v1.Info.Pod.memory:type_name -> payload.v1.Info.Memory - 71, // 62: payload.v1.Info.Pod.node:type_name -> payload.v1.Info.Node - 76, // 63: payload.v1.Info.Node.cpu:type_name -> payload.v1.Info.CPU - 77, // 64: payload.v1.Info.Node.memory:type_name -> payload.v1.Info.Memory - 78, // 65: payload.v1.Info.Node.Pods:type_name -> payload.v1.Info.Pods - 73, // 66: payload.v1.Info.Service.ports:type_name -> payload.v1.Info.ServicePort - 74, // 67: payload.v1.Info.Service.labels:type_name -> payload.v1.Info.Labels - 75, // 68: payload.v1.Info.Service.annotations:type_name -> payload.v1.Info.Annotations - 94, // 69: payload.v1.Info.Labels.labels:type_name -> payload.v1.Info.Labels.LabelsEntry - 95, // 70: payload.v1.Info.Annotations.annotations:type_name -> payload.v1.Info.Annotations.AnnotationsEntry - 70, // 71: payload.v1.Info.Pods.pods:type_name -> payload.v1.Info.Pod - 71, // 72: payload.v1.Info.Nodes.nodes:type_name -> payload.v1.Info.Node - 72, // 73: payload.v1.Info.Services.services:type_name -> payload.v1.Info.Service - 89, // 74: payload.v1.Info.Index.Detail.counts:type_name -> payload.v1.Info.Index.Detail.CountsEntry - 92, // 75: payload.v1.Info.Index.StatisticsDetail.details:type_name -> payload.v1.Info.Index.StatisticsDetail.DetailsEntry - 93, // 76: payload.v1.Info.Index.PropertyDetail.details:type_name -> payload.v1.Info.Index.PropertyDetail.DetailsEntry - 82, // 77: payload.v1.Info.Index.Detail.CountsEntry.value:type_name -> payload.v1.Info.Index.Count - 85, // 78: payload.v1.Info.Index.StatisticsDetail.DetailsEntry.value:type_name -> payload.v1.Info.Index.Statistics - 87, // 79: payload.v1.Info.Index.PropertyDetail.DetailsEntry.value:type_name -> payload.v1.Info.Index.Property - 96, // 80: payload.v1.Mirror.Targets.targets:type_name -> payload.v1.Mirror.Target - 81, // [81:81] is the sub-list for method output_type - 81, // [81:81] is the sub-list for method input_type - 81, // [81:81] is the sub-list for extension type_name - 81, // [81:81] is the sub-list for extension extendee - 0, // [0:81] is the sub-list for field type_name + 21, // 0: payload.v1.Search.Request.config:type_name -> payload.v1.Search.Config + 15, // 1: payload.v1.Search.MultiRequest.requests:type_name -> payload.v1.Search.Request + 21, // 2: payload.v1.Search.IDRequest.config:type_name -> payload.v1.Search.Config + 17, // 3: payload.v1.Search.MultiIDRequest.requests:type_name -> payload.v1.Search.IDRequest + 21, // 4: payload.v1.Search.ObjectRequest.config:type_name -> payload.v1.Search.Config + 25, // 5: payload.v1.Search.ObjectRequest.vectorizer:type_name -> payload.v1.Filter.Target + 19, // 6: payload.v1.Search.MultiObjectRequest.requests:type_name -> payload.v1.Search.ObjectRequest + 26, // 7: payload.v1.Search.Config.ingress_filters:type_name -> payload.v1.Filter.Config + 26, // 8: payload.v1.Search.Config.egress_filters:type_name -> payload.v1.Filter.Config + 0, // 9: payload.v1.Search.Config.aggregation_algorithm:type_name -> payload.v1.Search.AggregationAlgorithm + 99, // 10: payload.v1.Search.Config.ratio:type_name -> google.protobuf.FloatValue + 50, // 11: payload.v1.Search.Response.results:type_name -> payload.v1.Object.Distance + 22, // 12: payload.v1.Search.Responses.responses:type_name -> payload.v1.Search.Response + 22, // 13: payload.v1.Search.StreamResponse.response:type_name -> payload.v1.Search.Response + 100, // 14: payload.v1.Search.StreamResponse.status:type_name -> google.rpc.Status + 25, // 15: payload.v1.Filter.Config.targets:type_name -> payload.v1.Filter.Target + 54, // 16: payload.v1.Insert.Request.vector:type_name -> payload.v1.Object.Vector + 31, // 17: payload.v1.Insert.Request.config:type_name -> payload.v1.Insert.Config + 27, // 18: payload.v1.Insert.MultiRequest.requests:type_name -> payload.v1.Insert.Request + 60, // 19: payload.v1.Insert.ObjectRequest.object:type_name -> payload.v1.Object.Blob + 31, // 20: payload.v1.Insert.ObjectRequest.config:type_name -> payload.v1.Insert.Config + 25, // 21: payload.v1.Insert.ObjectRequest.vectorizer:type_name -> payload.v1.Filter.Target + 29, // 22: payload.v1.Insert.MultiObjectRequest.requests:type_name -> payload.v1.Insert.ObjectRequest + 26, // 23: payload.v1.Insert.Config.filters:type_name -> payload.v1.Filter.Config + 54, // 24: payload.v1.Update.Request.vector:type_name -> payload.v1.Object.Vector + 37, // 25: payload.v1.Update.Request.config:type_name -> payload.v1.Update.Config + 32, // 26: payload.v1.Update.MultiRequest.requests:type_name -> payload.v1.Update.Request + 60, // 27: payload.v1.Update.ObjectRequest.object:type_name -> payload.v1.Object.Blob + 37, // 28: payload.v1.Update.ObjectRequest.config:type_name -> payload.v1.Update.Config + 25, // 29: payload.v1.Update.ObjectRequest.vectorizer:type_name -> payload.v1.Filter.Target + 34, // 30: payload.v1.Update.MultiObjectRequest.requests:type_name -> payload.v1.Update.ObjectRequest + 26, // 31: payload.v1.Update.Config.filters:type_name -> payload.v1.Filter.Config + 54, // 32: payload.v1.Upsert.Request.vector:type_name -> payload.v1.Object.Vector + 42, // 33: payload.v1.Upsert.Request.config:type_name -> payload.v1.Upsert.Config + 38, // 34: payload.v1.Upsert.MultiRequest.requests:type_name -> payload.v1.Upsert.Request + 60, // 35: payload.v1.Upsert.ObjectRequest.object:type_name -> payload.v1.Object.Blob + 42, // 36: payload.v1.Upsert.ObjectRequest.config:type_name -> payload.v1.Upsert.Config + 25, // 37: payload.v1.Upsert.ObjectRequest.vectorizer:type_name -> payload.v1.Filter.Target + 40, // 38: payload.v1.Upsert.MultiObjectRequest.requests:type_name -> payload.v1.Upsert.ObjectRequest + 26, // 39: payload.v1.Upsert.Config.filters:type_name -> payload.v1.Filter.Config + 52, // 40: payload.v1.Remove.Request.id:type_name -> payload.v1.Object.ID + 47, // 41: payload.v1.Remove.Request.config:type_name -> payload.v1.Remove.Config + 43, // 42: payload.v1.Remove.MultiRequest.requests:type_name -> payload.v1.Remove.Request + 46, // 43: payload.v1.Remove.TimestampRequest.timestamps:type_name -> payload.v1.Remove.Timestamp + 1, // 44: payload.v1.Remove.Timestamp.operator:type_name -> payload.v1.Remove.Timestamp.Operator + 52, // 45: payload.v1.Object.VectorRequest.id:type_name -> payload.v1.Object.ID + 26, // 46: payload.v1.Object.VectorRequest.filters:type_name -> payload.v1.Filter.Config + 50, // 47: payload.v1.Object.StreamDistance.distance:type_name -> payload.v1.Object.Distance + 100, // 48: payload.v1.Object.StreamDistance.status:type_name -> google.rpc.Status + 52, // 49: payload.v1.Object.TimestampRequest.id:type_name -> payload.v1.Object.ID + 54, // 50: payload.v1.Object.Vectors.vectors:type_name -> payload.v1.Object.Vector + 54, // 51: payload.v1.Object.StreamVector.vector:type_name -> payload.v1.Object.Vector + 100, // 52: payload.v1.Object.StreamVector.status:type_name -> google.rpc.Status + 60, // 53: payload.v1.Object.StreamBlob.blob:type_name -> payload.v1.Object.Blob + 100, // 54: payload.v1.Object.StreamBlob.status:type_name -> google.rpc.Status + 62, // 55: payload.v1.Object.StreamLocation.location:type_name -> payload.v1.Object.Location + 100, // 56: payload.v1.Object.StreamLocation.status:type_name -> google.rpc.Status + 62, // 57: payload.v1.Object.Locations.locations:type_name -> payload.v1.Object.Location + 54, // 58: payload.v1.Object.List.Response.vector:type_name -> payload.v1.Object.Vector + 100, // 59: payload.v1.Object.List.Response.status:type_name -> google.rpc.Status + 77, // 60: payload.v1.Info.Pod.cpu:type_name -> payload.v1.Info.CPU + 78, // 61: payload.v1.Info.Pod.memory:type_name -> payload.v1.Info.Memory + 72, // 62: payload.v1.Info.Pod.node:type_name -> payload.v1.Info.Node + 77, // 63: payload.v1.Info.Node.cpu:type_name -> payload.v1.Info.CPU + 78, // 64: payload.v1.Info.Node.memory:type_name -> payload.v1.Info.Memory + 79, // 65: payload.v1.Info.Node.Pods:type_name -> payload.v1.Info.Pods + 74, // 66: payload.v1.Info.Service.ports:type_name -> payload.v1.Info.ServicePort + 75, // 67: payload.v1.Info.Service.labels:type_name -> payload.v1.Info.Labels + 76, // 68: payload.v1.Info.Service.annotations:type_name -> payload.v1.Info.Annotations + 95, // 69: payload.v1.Info.Labels.labels:type_name -> payload.v1.Info.Labels.LabelsEntry + 96, // 70: payload.v1.Info.Annotations.annotations:type_name -> payload.v1.Info.Annotations.AnnotationsEntry + 71, // 71: payload.v1.Info.Pods.pods:type_name -> payload.v1.Info.Pod + 72, // 72: payload.v1.Info.Nodes.nodes:type_name -> payload.v1.Info.Node + 73, // 73: payload.v1.Info.Services.services:type_name -> payload.v1.Info.Service + 90, // 74: payload.v1.Info.Index.Detail.counts:type_name -> payload.v1.Info.Index.Detail.CountsEntry + 93, // 75: payload.v1.Info.Index.StatisticsDetail.details:type_name -> payload.v1.Info.Index.StatisticsDetail.DetailsEntry + 94, // 76: payload.v1.Info.Index.PropertyDetail.details:type_name -> payload.v1.Info.Index.PropertyDetail.DetailsEntry + 83, // 77: payload.v1.Info.Index.Detail.CountsEntry.value:type_name -> payload.v1.Info.Index.Count + 86, // 78: payload.v1.Info.Index.StatisticsDetail.DetailsEntry.value:type_name -> payload.v1.Info.Index.Statistics + 88, // 79: payload.v1.Info.Index.PropertyDetail.DetailsEntry.value:type_name -> payload.v1.Info.Index.Property + 97, // 80: payload.v1.Mirror.Targets.targets:type_name -> payload.v1.Mirror.Target + 81, // [81:81] is the sub-list for method output_type + 81, // [81:81] is the sub-list for method input_type + 81, // [81:81] is the sub-list for extension type_name + 81, // [81:81] is the sub-list for extension extendee + 0, // [0:81] is the sub-list for field type_name } func init() { file_v1_payload_payload_proto_init() } @@ -7174,7 +7836,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[34].Exporter = func(v any, i int) any { - switch v := v.(*Update_Config); i { + switch v := v.(*Update_TimestampRequest); i { case 0: return &v.state case 1: @@ -7186,7 +7848,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[35].Exporter = func(v any, i int) any { - switch v := v.(*Upsert_Request); i { + switch v := v.(*Update_Config); i { case 0: return &v.state case 1: @@ -7198,7 +7860,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[36].Exporter = func(v any, i int) any { - switch v := v.(*Upsert_MultiRequest); i { + switch v := v.(*Upsert_Request); i { case 0: return &v.state case 1: @@ -7210,7 +7872,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[37].Exporter = func(v any, i int) any { - switch v := v.(*Upsert_ObjectRequest); i { + switch v := v.(*Upsert_MultiRequest); i { case 0: return &v.state case 1: @@ -7222,7 +7884,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[38].Exporter = func(v any, i int) any { - switch v := v.(*Upsert_MultiObjectRequest); i { + switch v := v.(*Upsert_ObjectRequest); i { case 0: return &v.state case 1: @@ -7234,7 +7896,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[39].Exporter = func(v any, i int) any { - switch v := v.(*Upsert_Config); i { + switch v := v.(*Upsert_MultiObjectRequest); i { case 0: return &v.state case 1: @@ -7246,7 +7908,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[40].Exporter = func(v any, i int) any { - switch v := v.(*Remove_Request); i { + switch v := v.(*Upsert_Config); i { case 0: return &v.state case 1: @@ -7258,7 +7920,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[41].Exporter = func(v any, i int) any { - switch v := v.(*Remove_MultiRequest); i { + switch v := v.(*Remove_Request); i { case 0: return &v.state case 1: @@ -7270,7 +7932,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[42].Exporter = func(v any, i int) any { - switch v := v.(*Remove_TimestampRequest); i { + switch v := v.(*Remove_MultiRequest); i { case 0: return &v.state case 1: @@ -7282,7 +7944,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[43].Exporter = func(v any, i int) any { - switch v := v.(*Remove_Timestamp); i { + switch v := v.(*Remove_TimestampRequest); i { case 0: return &v.state case 1: @@ -7294,7 +7956,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[44].Exporter = func(v any, i int) any { - switch v := v.(*Remove_Config); i { + switch v := v.(*Remove_Timestamp); i { case 0: return &v.state case 1: @@ -7306,7 +7968,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[45].Exporter = func(v any, i int) any { - switch v := v.(*Flush_Request); i { + switch v := v.(*Remove_Config); i { case 0: return &v.state case 1: @@ -7318,7 +7980,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[46].Exporter = func(v any, i int) any { - switch v := v.(*Object_VectorRequest); i { + switch v := v.(*Flush_Request); i { case 0: return &v.state case 1: @@ -7330,7 +7992,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[47].Exporter = func(v any, i int) any { - switch v := v.(*Object_Distance); i { + switch v := v.(*Object_VectorRequest); i { case 0: return &v.state case 1: @@ -7342,7 +8004,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[48].Exporter = func(v any, i int) any { - switch v := v.(*Object_StreamDistance); i { + switch v := v.(*Object_Distance); i { case 0: return &v.state case 1: @@ -7354,7 +8016,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[49].Exporter = func(v any, i int) any { - switch v := v.(*Object_ID); i { + switch v := v.(*Object_StreamDistance); i { case 0: return &v.state case 1: @@ -7366,7 +8028,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[50].Exporter = func(v any, i int) any { - switch v := v.(*Object_IDs); i { + switch v := v.(*Object_ID); i { case 0: return &v.state case 1: @@ -7378,7 +8040,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[51].Exporter = func(v any, i int) any { - switch v := v.(*Object_Vector); i { + switch v := v.(*Object_IDs); i { case 0: return &v.state case 1: @@ -7390,7 +8052,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[52].Exporter = func(v any, i int) any { - switch v := v.(*Object_TimestampRequest); i { + switch v := v.(*Object_Vector); i { case 0: return &v.state case 1: @@ -7402,7 +8064,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[53].Exporter = func(v any, i int) any { - switch v := v.(*Object_Timestamp); i { + switch v := v.(*Object_TimestampRequest); i { case 0: return &v.state case 1: @@ -7414,7 +8076,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[54].Exporter = func(v any, i int) any { - switch v := v.(*Object_Vectors); i { + switch v := v.(*Object_Timestamp); i { case 0: return &v.state case 1: @@ -7426,7 +8088,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[55].Exporter = func(v any, i int) any { - switch v := v.(*Object_StreamVector); i { + switch v := v.(*Object_Vectors); i { case 0: return &v.state case 1: @@ -7438,7 +8100,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[56].Exporter = func(v any, i int) any { - switch v := v.(*Object_ReshapeVector); i { + switch v := v.(*Object_StreamVector); i { case 0: return &v.state case 1: @@ -7450,7 +8112,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[57].Exporter = func(v any, i int) any { - switch v := v.(*Object_Blob); i { + switch v := v.(*Object_ReshapeVector); i { case 0: return &v.state case 1: @@ -7462,7 +8124,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[58].Exporter = func(v any, i int) any { - switch v := v.(*Object_StreamBlob); i { + switch v := v.(*Object_Blob); i { case 0: return &v.state case 1: @@ -7474,7 +8136,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[59].Exporter = func(v any, i int) any { - switch v := v.(*Object_Location); i { + switch v := v.(*Object_StreamBlob); i { case 0: return &v.state case 1: @@ -7486,7 +8148,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[60].Exporter = func(v any, i int) any { - switch v := v.(*Object_StreamLocation); i { + switch v := v.(*Object_Location); i { case 0: return &v.state case 1: @@ -7498,7 +8160,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[61].Exporter = func(v any, i int) any { - switch v := v.(*Object_Locations); i { + switch v := v.(*Object_StreamLocation); i { case 0: return &v.state case 1: @@ -7510,7 +8172,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[62].Exporter = func(v any, i int) any { - switch v := v.(*Object_List); i { + switch v := v.(*Object_Locations); i { case 0: return &v.state case 1: @@ -7522,7 +8184,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[63].Exporter = func(v any, i int) any { - switch v := v.(*Object_List_Request); i { + switch v := v.(*Object_List); i { case 0: return &v.state case 1: @@ -7534,7 +8196,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[64].Exporter = func(v any, i int) any { - switch v := v.(*Object_List_Response); i { + switch v := v.(*Object_List_Request); i { case 0: return &v.state case 1: @@ -7546,7 +8208,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[65].Exporter = func(v any, i int) any { - switch v := v.(*Control_CreateIndexRequest); i { + switch v := v.(*Object_List_Response); i { case 0: return &v.state case 1: @@ -7558,7 +8220,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[66].Exporter = func(v any, i int) any { - switch v := v.(*Discoverer_Request); i { + switch v := v.(*Control_CreateIndexRequest); i { case 0: return &v.state case 1: @@ -7570,7 +8232,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[67].Exporter = func(v any, i int) any { - switch v := v.(*Info_Index); i { + switch v := v.(*Discoverer_Request); i { case 0: return &v.state case 1: @@ -7582,7 +8244,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[68].Exporter = func(v any, i int) any { - switch v := v.(*Info_Pod); i { + switch v := v.(*Info_Index); i { case 0: return &v.state case 1: @@ -7594,7 +8256,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[69].Exporter = func(v any, i int) any { - switch v := v.(*Info_Node); i { + switch v := v.(*Info_Pod); i { case 0: return &v.state case 1: @@ -7606,7 +8268,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[70].Exporter = func(v any, i int) any { - switch v := v.(*Info_Service); i { + switch v := v.(*Info_Node); i { case 0: return &v.state case 1: @@ -7618,7 +8280,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[71].Exporter = func(v any, i int) any { - switch v := v.(*Info_ServicePort); i { + switch v := v.(*Info_Service); i { case 0: return &v.state case 1: @@ -7630,7 +8292,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[72].Exporter = func(v any, i int) any { - switch v := v.(*Info_Labels); i { + switch v := v.(*Info_ServicePort); i { case 0: return &v.state case 1: @@ -7642,7 +8304,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[73].Exporter = func(v any, i int) any { - switch v := v.(*Info_Annotations); i { + switch v := v.(*Info_Labels); i { case 0: return &v.state case 1: @@ -7654,7 +8316,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[74].Exporter = func(v any, i int) any { - switch v := v.(*Info_CPU); i { + switch v := v.(*Info_Annotations); i { case 0: return &v.state case 1: @@ -7666,7 +8328,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[75].Exporter = func(v any, i int) any { - switch v := v.(*Info_Memory); i { + switch v := v.(*Info_CPU); i { case 0: return &v.state case 1: @@ -7678,7 +8340,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[76].Exporter = func(v any, i int) any { - switch v := v.(*Info_Pods); i { + switch v := v.(*Info_Memory); i { case 0: return &v.state case 1: @@ -7690,7 +8352,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[77].Exporter = func(v any, i int) any { - switch v := v.(*Info_Nodes); i { + switch v := v.(*Info_Pods); i { case 0: return &v.state case 1: @@ -7702,7 +8364,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[78].Exporter = func(v any, i int) any { - switch v := v.(*Info_Services); i { + switch v := v.(*Info_Nodes); i { case 0: return &v.state case 1: @@ -7714,7 +8376,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[79].Exporter = func(v any, i int) any { - switch v := v.(*Info_IPs); i { + switch v := v.(*Info_Services); i { case 0: return &v.state case 1: @@ -7726,7 +8388,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[80].Exporter = func(v any, i int) any { - switch v := v.(*Info_Index_Count); i { + switch v := v.(*Info_IPs); i { case 0: return &v.state case 1: @@ -7738,7 +8400,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[81].Exporter = func(v any, i int) any { - switch v := v.(*Info_Index_Detail); i { + switch v := v.(*Info_Index_Count); i { case 0: return &v.state case 1: @@ -7750,7 +8412,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[82].Exporter = func(v any, i int) any { - switch v := v.(*Info_Index_UUID); i { + switch v := v.(*Info_Index_Detail); i { case 0: return &v.state case 1: @@ -7762,7 +8424,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[83].Exporter = func(v any, i int) any { - switch v := v.(*Info_Index_Statistics); i { + switch v := v.(*Info_Index_UUID); i { case 0: return &v.state case 1: @@ -7774,7 +8436,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[84].Exporter = func(v any, i int) any { - switch v := v.(*Info_Index_StatisticsDetail); i { + switch v := v.(*Info_Index_Statistics); i { case 0: return &v.state case 1: @@ -7786,7 +8448,7 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[85].Exporter = func(v any, i int) any { - switch v := v.(*Info_Index_Property); i { + switch v := v.(*Info_Index_StatisticsDetail); i { case 0: return &v.state case 1: @@ -7798,6 +8460,18 @@ func file_v1_payload_payload_proto_init() { } } file_v1_payload_payload_proto_msgTypes[86].Exporter = func(v any, i int) any { + switch v := v.(*Info_Index_Property); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_v1_payload_payload_proto_msgTypes[87].Exporter = func(v any, i int) any { switch v := v.(*Info_Index_PropertyDetail); i { case 0: return &v.state @@ -7809,7 +8483,7 @@ func file_v1_payload_payload_proto_init() { return nil } } - file_v1_payload_payload_proto_msgTypes[88].Exporter = func(v any, i int) any { + file_v1_payload_payload_proto_msgTypes[89].Exporter = func(v any, i int) any { switch v := v.(*Info_Index_UUID_Committed); i { case 0: return &v.state @@ -7821,7 +8495,7 @@ func file_v1_payload_payload_proto_init() { return nil } } - file_v1_payload_payload_proto_msgTypes[89].Exporter = func(v any, i int) any { + file_v1_payload_payload_proto_msgTypes[90].Exporter = func(v any, i int) any { switch v := v.(*Info_Index_UUID_Uncommitted); i { case 0: return &v.state @@ -7833,7 +8507,7 @@ func file_v1_payload_payload_proto_init() { return nil } } - file_v1_payload_payload_proto_msgTypes[94].Exporter = func(v any, i int) any { + file_v1_payload_payload_proto_msgTypes[95].Exporter = func(v any, i int) any { switch v := v.(*Mirror_Target); i { case 0: return &v.state @@ -7845,7 +8519,7 @@ func file_v1_payload_payload_proto_init() { return nil } } - file_v1_payload_payload_proto_msgTypes[95].Exporter = func(v any, i int) any { + file_v1_payload_payload_proto_msgTypes[96].Exporter = func(v any, i int) any { switch v := v.(*Mirror_Targets); i { case 0: return &v.state @@ -7862,23 +8536,23 @@ func file_v1_payload_payload_proto_init() { (*Search_StreamResponse_Response)(nil), (*Search_StreamResponse_Status)(nil), } - file_v1_payload_payload_proto_msgTypes[48].OneofWrappers = []any{ + file_v1_payload_payload_proto_msgTypes[49].OneofWrappers = []any{ (*Object_StreamDistance_Distance)(nil), (*Object_StreamDistance_Status)(nil), } - file_v1_payload_payload_proto_msgTypes[55].OneofWrappers = []any{ + file_v1_payload_payload_proto_msgTypes[56].OneofWrappers = []any{ (*Object_StreamVector_Vector)(nil), (*Object_StreamVector_Status)(nil), } - file_v1_payload_payload_proto_msgTypes[58].OneofWrappers = []any{ + file_v1_payload_payload_proto_msgTypes[59].OneofWrappers = []any{ (*Object_StreamBlob_Blob)(nil), (*Object_StreamBlob_Status)(nil), } - file_v1_payload_payload_proto_msgTypes[60].OneofWrappers = []any{ + file_v1_payload_payload_proto_msgTypes[61].OneofWrappers = []any{ (*Object_StreamLocation_Location)(nil), (*Object_StreamLocation_Status)(nil), } - file_v1_payload_payload_proto_msgTypes[64].OneofWrappers = []any{ + file_v1_payload_payload_proto_msgTypes[65].OneofWrappers = []any{ (*Object_List_Response_Vector)(nil), (*Object_List_Response_Status)(nil), } @@ -7888,7 +8562,7 @@ func file_v1_payload_payload_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_v1_payload_payload_proto_rawDesc, NumEnums: 2, - NumMessages: 96, + NumMessages: 97, NumExtensions: 0, NumServices: 0, }, diff --git a/apis/grpc/v1/payload/payload.pb.json.go b/apis/grpc/v1/payload/payload.pb.json.go index f05c39d277..0c65501773 100644 --- a/apis/grpc/v1/payload/payload.pb.json.go +++ b/apis/grpc/v1/payload/payload.pb.json.go @@ -271,6 +271,16 @@ func (msg *Update_MultiObjectRequest) UnmarshalJSON(b []byte) error { return protojson.UnmarshalOptions{}.Unmarshal(b, msg) } +// MarshalJSON implements json.Marshaler +func (msg *Update_TimestampRequest) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{}.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Update_TimestampRequest) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{}.Unmarshal(b, msg) +} + // MarshalJSON implements json.Marshaler func (msg *Update_Config) MarshalJSON() ([]byte, error) { return protojson.MarshalOptions{}.Marshal(msg) diff --git a/apis/grpc/v1/payload/payload_vtproto.pb.go b/apis/grpc/v1/payload/payload_vtproto.pb.go index fc0f412603..206cf579b5 100644 --- a/apis/grpc/v1/payload/payload_vtproto.pb.go +++ b/apis/grpc/v1/payload/payload_vtproto.pb.go @@ -561,6 +561,25 @@ func (m *Update_MultiObjectRequest) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *Update_TimestampRequest) CloneVT() *Update_TimestampRequest { + if m == nil { + return (*Update_TimestampRequest)(nil) + } + r := new(Update_TimestampRequest) + r.Id = m.Id + r.Timestamp = m.Timestamp + r.Force = m.Force + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Update_TimestampRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Update_Config) CloneVT() *Update_Config { if m == nil { return (*Update_Config)(nil) @@ -2831,6 +2850,32 @@ func (this *Update_MultiObjectRequest) EqualMessageVT(thatMsg proto.Message) boo return this.EqualVT(that) } +func (this *Update_TimestampRequest) EqualVT(that *Update_TimestampRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Id != that.Id { + return false + } + if this.Timestamp != that.Timestamp { + return false + } + if this.Force != that.Force { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *Update_TimestampRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*Update_TimestampRequest) + if !ok { + return false + } + return this.EqualVT(that) +} + func (this *Update_Config) EqualVT(that *Update_Config) bool { if this == that { return true @@ -6317,6 +6362,61 @@ func (m *Update_MultiObjectRequest) MarshalToSizedBufferVT(dAtA []byte) (int, er return len(dAtA) - i, nil } +func (m *Update_TimestampRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Update_TimestampRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Update_TimestampRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Timestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Update_Config) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -10644,6 +10744,26 @@ func (m *Update_MultiObjectRequest) SizeVT() (n int) { return n } +func (m *Update_TimestampRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timestamp)) + } + if m.Force { + n += 2 + } + n += len(m.unknownFields) + return n +} + func (m *Update_Config) SizeVT() (n int) { if m == nil { return 0 @@ -14837,6 +14957,129 @@ func (m *Update_MultiObjectRequest) UnmarshalVT(dAtA []byte) error { return nil } +func (m *Update_TimestampRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Update_TimestampRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Update_TimestampRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + func (m *Update_Config) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/apis/grpc/v1/vald/update.pb.go b/apis/grpc/v1/vald/update.pb.go index ad05fbe50c..fc5db2cf5f 100644 --- a/apis/grpc/v1/vald/update.pb.go +++ b/apis/grpc/v1/vald/update.pb.go @@ -46,7 +46,7 @@ var file_v1_vald_update_proto_rawDesc = []byte{ 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0x9f, 0x02, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, + 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0x92, 0x03, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, @@ -64,32 +64,42 @@ var file_v1_vald_update_proto_rawDesc = []byte{ 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x3a, 0x01, 0x2a, 0x22, 0x10, 0x2f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x42, 0x53, 0x0a, 0x1a, 0x6f, 0x72, 0x67, - 0x2e, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x31, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x64, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x2f, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x12, 0x71, 0x0a, 0x0f, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x23, 0x2e, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1c, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x3a, 0x01, 0x2a, 0x22, 0x11, 0x2f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x53, 0x0a, 0x1a, + 0x6f, 0x72, 0x67, 0x2e, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x64, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x2f, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x61, 0x6c, + 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_v1_vald_update_proto_goTypes = []any{ - (*payload.Update_Request)(nil), // 0: payload.v1.Update.Request - (*payload.Update_MultiRequest)(nil), // 1: payload.v1.Update.MultiRequest - (*payload.Object_Location)(nil), // 2: payload.v1.Object.Location - (*payload.Object_StreamLocation)(nil), // 3: payload.v1.Object.StreamLocation - (*payload.Object_Locations)(nil), // 4: payload.v1.Object.Locations + (*payload.Update_Request)(nil), // 0: payload.v1.Update.Request + (*payload.Update_MultiRequest)(nil), // 1: payload.v1.Update.MultiRequest + (*payload.Update_TimestampRequest)(nil), // 2: payload.v1.Update.TimestampRequest + (*payload.Object_Location)(nil), // 3: payload.v1.Object.Location + (*payload.Object_StreamLocation)(nil), // 4: payload.v1.Object.StreamLocation + (*payload.Object_Locations)(nil), // 5: payload.v1.Object.Locations } var file_v1_vald_update_proto_depIdxs = []int32{ 0, // 0: vald.v1.Update.Update:input_type -> payload.v1.Update.Request 0, // 1: vald.v1.Update.StreamUpdate:input_type -> payload.v1.Update.Request 1, // 2: vald.v1.Update.MultiUpdate:input_type -> payload.v1.Update.MultiRequest - 2, // 3: vald.v1.Update.Update:output_type -> payload.v1.Object.Location - 3, // 4: vald.v1.Update.StreamUpdate:output_type -> payload.v1.Object.StreamLocation - 4, // 5: vald.v1.Update.MultiUpdate:output_type -> payload.v1.Object.Locations - 3, // [3:6] is the sub-list for method output_type - 0, // [0:3] is the sub-list for method input_type + 2, // 3: vald.v1.Update.UpdateTimestamp:input_type -> payload.v1.Update.TimestampRequest + 3, // 4: vald.v1.Update.Update:output_type -> payload.v1.Object.Location + 4, // 5: vald.v1.Update.StreamUpdate:output_type -> payload.v1.Object.StreamLocation + 5, // 6: vald.v1.Update.MultiUpdate:output_type -> payload.v1.Object.Locations + 3, // 7: vald.v1.Update.UpdateTimestamp:output_type -> payload.v1.Object.Location + 4, // [4:8] is the sub-list for method output_type + 0, // [0:4] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/apis/grpc/v1/vald/update_vtproto.pb.go b/apis/grpc/v1/vald/update_vtproto.pb.go index 60f92fd0a8..5c03218c4f 100644 --- a/apis/grpc/v1/vald/update_vtproto.pb.go +++ b/apis/grpc/v1/vald/update_vtproto.pb.go @@ -48,6 +48,8 @@ type UpdateClient interface { StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (Update_StreamUpdateClient, error) // A method to update multiple indexed vectors in a single request. MultiUpdate(ctx context.Context, in *payload.Update_MultiRequest, opts ...grpc.CallOption) (*payload.Object_Locations, error) + // A method to update timestamp an indexed vector. + UpdateTimestamp(ctx context.Context, in *payload.Update_TimestampRequest, opts ...grpc.CallOption) (*payload.Object_Location, error) } type updateClient struct { @@ -113,6 +115,17 @@ func (c *updateClient) MultiUpdate( return out, nil } +func (c *updateClient) UpdateTimestamp( + ctx context.Context, in *payload.Update_TimestampRequest, opts ...grpc.CallOption, +) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Update/UpdateTimestamp", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // UpdateServer is the server API for Update service. // All implementations must embed UnimplementedUpdateServer // for forward compatibility @@ -123,6 +136,8 @@ type UpdateServer interface { StreamUpdate(Update_StreamUpdateServer) error // A method to update multiple indexed vectors in a single request. MultiUpdate(context.Context, *payload.Update_MultiRequest) (*payload.Object_Locations, error) + // A method to update timestamp an indexed vector. + UpdateTimestamp(context.Context, *payload.Update_TimestampRequest) (*payload.Object_Location, error) mustEmbedUnimplementedUpdateServer() } @@ -144,6 +159,12 @@ func (UnimplementedUpdateServer) MultiUpdate( ) (*payload.Object_Locations, error) { return nil, status.Errorf(codes.Unimplemented, "method MultiUpdate not implemented") } + +func (UnimplementedUpdateServer) UpdateTimestamp( + context.Context, *payload.Update_TimestampRequest, +) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateTimestamp not implemented") +} func (UnimplementedUpdateServer) mustEmbedUnimplementedUpdateServer() {} // UnsafeUpdateServer may be embedded to opt out of forward compatibility for this service. @@ -223,6 +244,26 @@ func _Update_MultiUpdate_Handler( return interceptor(ctx, in, info, handler) } +func _Update_UpdateTimestamp_Handler( + srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor, +) (any, error) { + in := new(payload.Update_TimestampRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UpdateServer).UpdateTimestamp(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Update/UpdateTimestamp", + } + handler := func(ctx context.Context, req any) (any, error) { + return srv.(UpdateServer).UpdateTimestamp(ctx, req.(*payload.Update_TimestampRequest)) + } + return interceptor(ctx, in, info, handler) +} + // Update_ServiceDesc is the grpc.ServiceDesc for Update service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -238,6 +279,10 @@ var Update_ServiceDesc = grpc.ServiceDesc{ MethodName: "MultiUpdate", Handler: _Update_MultiUpdate_Handler, }, + { + MethodName: "UpdateTimestamp", + Handler: _Update_UpdateTimestamp_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/apis/grpc/v1/vald/vald.go b/apis/grpc/v1/vald/vald.go index 3ff180c0e8..f65f2a3e40 100644 --- a/apis/grpc/v1/vald/vald.go +++ b/apis/grpc/v1/vald/vald.go @@ -95,6 +95,7 @@ const ( UpdateObjectRPCName = "UpdateObject" StreamUpdateObjectRPCName = "StreamUpdateObject" MultiUpdateObjectRPCName = "MultiUpdateObject" + UpdateTimestampRPCName = "UpdateTimestamp" UpsertRPCName = "Upsert" StreamUpsertRPCName = "StreamUpsert" diff --git a/apis/proto/v1/payload/payload.proto b/apis/proto/v1/payload/payload.proto index 9e2d7f3371..7eb58e9c5e 100644 --- a/apis/proto/v1/payload/payload.proto +++ b/apis/proto/v1/payload/payload.proto @@ -224,6 +224,16 @@ message Update { repeated ObjectRequest requests = 1; } + // Represent a vector meta data. + message TimestampRequest { + // The vector ID. + string id = 1 [(buf.validate.field).string.min_len = 1]; + // timestamp represents when this vector inserted. + int64 timestamp = 2; + // force represents forcefully update the timestamp. + bool force = 3; + } + // Represent the update configuration. message Config { // A flag to skip exist check during update operation. diff --git a/apis/proto/v1/vald/update.proto b/apis/proto/v1/vald/update.proto index 4cab1cabf0..f2e9598a9d 100644 --- a/apis/proto/v1/vald/update.proto +++ b/apis/proto/v1/vald/update.proto @@ -46,4 +46,12 @@ service Update { body: "*" }; } + + // A method to update timestamp an indexed vector. + rpc UpdateTimestamp(payload.v1.Update.TimestampRequest) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post: "/update/timestamp" + body: "*" + }; + } } diff --git a/apis/swagger/v1/vald/update.swagger.json b/apis/swagger/v1/vald/update.swagger.json index d295febc19..6e6c883681 100644 --- a/apis/swagger/v1/vald/update.swagger.json +++ b/apis/swagger/v1/vald/update.swagger.json @@ -75,6 +75,38 @@ ], "tags": ["Update"] } + }, + "/update/timestamp": { + "post": { + "summary": "A method to update timestamp an indexed vector.", + "operationId": "Update_UpdateTimestamp", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1ObjectLocation" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "body", + "description": "Represent a vector meta data.", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1UpdateTimestampRequest" + } + } + ], + "tags": ["Update"] + } } }, "definitions": { @@ -262,6 +294,25 @@ } }, "description": "Represent the update request." + }, + "v1UpdateTimestampRequest": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The vector ID." + }, + "timestamp": { + "type": "string", + "format": "int64", + "description": "timestamp represents when this vector inserted." + }, + "force": { + "type": "boolean", + "description": "force represents forcefully update the timestamp." + } + }, + "description": "Represent a vector meta data." } } } diff --git a/charts/vald-benchmark-operator/README.md b/charts/vald-benchmark-operator/README.md index a5117ebca4..bc3dda2a40 100644 --- a/charts/vald-benchmark-operator/README.md +++ b/charts/vald-benchmark-operator/README.md @@ -230,7 +230,7 @@ Run the following command to install the chart, | server_config.servers.grpc.server.probe_wait_time | string | `"3s"` | | | server_config.servers.grpc.server.restart | bool | `true` | | | server_config.servers.grpc.server.socket_path | string | `""` | | -| server_config.servers.grpc.serviecPort | int | `8081` | | +| server_config.servers.grpc.servicePort | int | `8081` | | | server_config.servers.rest.enabled | bool | `false` | | | server_config.tls.ca | string | `"/path/to/ca"` | | | server_config.tls.cert | string | `"/path/to/cert"` | | diff --git a/charts/vald-benchmark-operator/schemas/job-values.yaml b/charts/vald-benchmark-operator/schemas/job-values.yaml index a619e55d4d..1e166a1db0 100644 --- a/charts/vald-benchmark-operator/schemas/job-values.yaml +++ b/charts/vald-benchmark-operator/schemas/job-values.yaml @@ -800,7 +800,7 @@ server_config: reuse_port: true # server_config.healths.readiness.server.socket_option.reuse_addr -- server listen socket option for reuse_addr functionality reuse_addr: true - # server_config.healths.readiness.server.socket_option.tcp_fast_oepn -- server listen socket option for tcp_fast_open functionality + # server_config.healths.readiness.server.socket_option.tcp_fast_open -- server listen socket option for tcp_fast_open functionality tcp_fast_open: true # server_config.healths.readiness.server.socket_option.tcp_no_delay -- server listen socket option for tcp_no_delay functionality tcp_no_delay: true diff --git a/charts/vald-benchmark-operator/templates/deployment.yaml b/charts/vald-benchmark-operator/templates/deployment.yaml index 14df9b4c2c..e8d36b3a41 100644 --- a/charts/vald-benchmark-operator/templates/deployment.yaml +++ b/charts/vald-benchmark-operator/templates/deployment.yaml @@ -47,7 +47,7 @@ spec: {{- if .Values.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if .Values.server_config.metrics.pprof.enabeld }} + {{- if .Values.server_config.metrics.pprof.enabled }} pyroscope.io/scrape: "true" pyroscope.io/application-name: {{ .Values.name }} pyroscope.io/profile-cpu-enabled: "true" diff --git a/charts/vald-benchmark-operator/values.yaml b/charts/vald-benchmark-operator/values.yaml index 0712043902..c044d2656d 100644 --- a/charts/vald-benchmark-operator/values.yaml +++ b/charts/vald-benchmark-operator/values.yaml @@ -384,7 +384,7 @@ server_config: # @schema {"name": "server_config.servers.grpc.port", "type": "integer"} port: 8081 # @schema {"name": "server_config.servers.grpc.servicePort", "type": "integer"} - serviecPort: 8081 + servicePort: 8081 # @schema {"name": "server_config.servers.grpc.server", "type": "object"} server: # @schema {"name": "server_config.servers.grpc.server.mode", "type": "string"} diff --git a/charts/vald/README.md b/charts/vald/README.md index c2c9e211d4..3f574e605a 100644 --- a/charts/vald/README.md +++ b/charts/vald/README.md @@ -73,13 +73,14 @@ Run the following command to install the chart, | agent.faiss.initial_delay_max_duration | string | `"3m"` | maximum duration for initial delay | | agent.faiss.kvsdb.concurrency | int | `6` | kvsdb processing concurrency | | agent.faiss.load_index_timeout_factor | string | `"1ms"` | a factor of load index timeout. timeout duration will be calculated by (index count to be loaded) \* (factor). | -| agent.faiss.m | int | `8` | | +| agent.faiss.m | int | `8` | m | | agent.faiss.max_load_index_timeout | string | `"10m"` | maximum duration of load index timeout | -| agent.faiss.metric_type | string | `"l2"` | | +| agent.faiss.method_type | string | `"ivfpq"` | method type it should be `ivfpq` or `binaryindex` | +| agent.faiss.metric_type | string | `"l2"` | metric type it should be `innerproduct` or `l2` | | agent.faiss.min_load_index_timeout | string | `"3m"` | minimum duration of load index timeout | | agent.faiss.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of myself | -| agent.faiss.nbits_per_idx | int | `8` | | -| agent.faiss.nlist | int | `100` | | +| agent.faiss.nbits_per_idx | int | `8` | nbits_per_idx | +| agent.faiss.nlist | int | `100` | nlist | | agent.faiss.pod_name | string | `"_MY_POD_NAME_"` | pod name of myself | | agent.faiss.vqueue.delete_buffer_pool_size | int | `5000` | delete slice pool buffer size | | agent.faiss.vqueue.insert_buffer_pool_size | int | `10000` | insert slice pool buffer size | @@ -444,7 +445,7 @@ Run the following command to install the chart, | defaults.server_config.healths.readiness.server.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | | defaults.server_config.healths.readiness.server.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | | defaults.server_config.healths.readiness.server.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | -| defaults.server_config.healths.readiness.server.socket_option.tcp_fast_open | bool | `true` | | +| defaults.server_config.healths.readiness.server.socket_option.tcp_fast_open | bool | `true` | server listen socket option for tcp_fast_open functionality | | defaults.server_config.healths.readiness.server.socket_option.tcp_no_delay | bool | `true` | server listen socket option for tcp_no_delay functionality | | defaults.server_config.healths.readiness.server.socket_option.tcp_quick_ack | bool | `true` | server listen socket option for tcp_quick_ack functionality | | defaults.server_config.healths.readiness.server.socket_path | string | `""` | server socket_path | diff --git a/charts/vald/values.schema.json b/charts/vald/values.schema.json index d67182b679..f364e2359b 100644 --- a/charts/vald/values.schema.json +++ b/charts/vald/values.schema.json @@ -160,12 +160,21 @@ "type": "string", "description": "a factor of load index timeout. timeout duration will be calculated by (index count to be loaded) * (factor)." }, - "m": { "type": "integer" }, + "m": { "type": "integer", "description": "m" }, "max_load_index_timeout": { "type": "string", "description": "maximum duration of load index timeout" }, - "metric_type": { "type": "string", "enum": ["innerproduct", "l2"] }, + "method_type": { + "type": "string", + "description": "method type it should be `ivfpq` or `binaryindex`", + "enum": ["ivfpq", "binaryindex"] + }, + "metric_type": { + "type": "string", + "description": "metric type it should be `innerproduct` or `l2`", + "enum": ["innerproduct", "l2"] + }, "min_load_index_timeout": { "type": "string", "description": "minimum duration of load index timeout" @@ -174,8 +183,11 @@ "type": "string", "description": "namespace of myself" }, - "nbits_per_idx": { "type": "integer" }, - "nlist": { "type": "integer" }, + "nbits_per_idx": { + "type": "integer", + "description": "nbits_per_idx" + }, + "nlist": { "type": "integer", "description": "nlist" }, "pod_name": { "type": "string", "description": "pod name of myself" diff --git a/charts/vald/values.yaml b/charts/vald/values.yaml index acbc75da52..27960e5ba7 100644 --- a/charts/vald/values.yaml +++ b/charts/vald/values.yaml @@ -530,7 +530,7 @@ defaults: reuse_port: true # defaults.server_config.healths.readiness.server.socket_option.reuse_addr -- server listen socket option for reuse_addr functionality reuse_addr: true - # defaults.server_config.healths.readiness.server.socket_option.tcp_fast_oepn -- server listen socket option for tcp_fast_open functionality + # defaults.server_config.healths.readiness.server.socket_option.tcp_fast_open -- server listen socket option for tcp_fast_open functionality tcp_fast_open: true # defaults.server_config.healths.readiness.server.socket_option.tcp_no_delay -- server listen socket option for tcp_no_delay functionality tcp_no_delay: true diff --git a/codecov.yml b/codecov.yaml similarity index 100% rename from codecov.yml rename to codecov.yaml diff --git a/dockers/agent/core/agent/Dockerfile b/dockers/agent/core/agent/Dockerfile index f3195dde42..41d415e47e 100644 --- a/dockers/agent/core/agent/Dockerfile +++ b/dockers/agent/core/agent/Dockerfile @@ -36,7 +36,7 @@ ENV LC_ALL=en_US.UTF-8 ENV ORG=vdaas ENV PKG=agent/core/agent ENV REPO=vald -ENV RUST_HOME=/usr/loacl/lib/rust +ENV RUST_HOME=/usr/local/lib/rust ENV TZ=Etc/UTC ENV USER=root ENV CARGO_HOME=${RUST_HOME}/cargo @@ -47,9 +47,10 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -91,4 +92,4 @@ LABEL maintainer="vdaas.org vald team " COPY --from=builder /usr/bin/agent /usr/bin/agent # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/agent"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/agent"] diff --git a/dockers/agent/core/faiss/Dockerfile b/dockers/agent/core/faiss/Dockerfile index 463e13494b..7566609ed7 100644 --- a/dockers/agent/core/faiss/Dockerfile +++ b/dockers/agent/core/faiss/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -93,4 +95,4 @@ COPY --from=builder /usr/bin/faiss /usr/bin/faiss COPY cmd/agent/core/faiss/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/faiss"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/faiss"] diff --git a/dockers/agent/core/ngt/Dockerfile b/dockers/agent/core/ngt/Dockerfile index cf0bbcdce2..d764a800d1 100644 --- a/dockers/agent/core/ngt/Dockerfile +++ b/dockers/agent/core/ngt/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -92,4 +94,4 @@ COPY --from=builder /usr/bin/ngt /usr/bin/ngt COPY cmd/agent/core/ngt/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/ngt"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/ngt"] diff --git a/dockers/agent/sidecar/Dockerfile b/dockers/agent/sidecar/Dockerfile index 2b06e565fd..b4a1d5ed90 100644 --- a/dockers/agent/sidecar/Dockerfile +++ b/dockers/agent/sidecar/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -82,4 +84,4 @@ LABEL maintainer="vdaas.org vald team " COPY --from=builder /usr/bin/sidecar /usr/bin/sidecar # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/sidecar"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/sidecar"] diff --git a/dockers/binfmt/Dockerfile b/dockers/binfmt/Dockerfile index 47284d17e2..f2d73909bd 100644 --- a/dockers/binfmt/Dockerfile +++ b/dockers/binfmt/Dockerfile @@ -16,4 +16,4 @@ # # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go -FROM tonistiigi/binfmt:master AS builder \ No newline at end of file +FROM tonistiigi/binfmt:master AS builder diff --git a/dockers/buildbase/Dockerfile b/dockers/buildbase/Dockerfile index 6457b01457..301a31e010 100644 --- a/dockers/buildbase/Dockerfile +++ b/dockers/buildbase/Dockerfile @@ -16,4 +16,4 @@ # # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go -FROM ubuntu:devel AS builder \ No newline at end of file +FROM ubuntu:devel AS builder diff --git a/dockers/buildkit/Dockerfile b/dockers/buildkit/Dockerfile index 9dd722ea25..99c71c61e2 100644 --- a/dockers/buildkit/Dockerfile +++ b/dockers/buildkit/Dockerfile @@ -16,4 +16,4 @@ # # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go -FROM moby/buildkit:master AS builder \ No newline at end of file +FROM moby/buildkit:master AS builder diff --git a/dockers/ci/base/Dockerfile b/dockers/ci/base/Dockerfile index 09fccb1e35..c4f278942f 100644 --- a/dockers/ci/base/Dockerfile +++ b/dockers/ci/base/Dockerfile @@ -41,7 +41,7 @@ ENV LC_ALL=en_US.UTF-8 ENV ORG=vdaas ENV PKG=ci/base ENV REPO=vald -ENV RUST_HOME=/usr/loacl/lib/rust +ENV RUST_HOME=/usr/local/lib/rust ENV TZ=Etc/UTC ENV USER=root ENV RUSTUP_HOME=${RUST_HOME}/rustup @@ -52,11 +52,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -113,7 +115,6 @@ RUN --mount=type=bind,target=.,rw \ && make kubelinter/install \ && make reviewdog/install \ && make tparse/install \ - && make valdcli/install \ && make yq/install \ && make minikube/install \ && make stern/install \ @@ -123,4 +124,4 @@ RUN --mount=type=bind,target=.,rw \ && rm -rf ${GOPATH}/src/github.com/${ORG}/${REPO}/* # skipcq: DOK-DL3002 USER root:root -ENTRYPOINT ["/bin/bash"] \ No newline at end of file +ENTRYPOINT ["/bin/bash"] diff --git a/dockers/dev/Dockerfile b/dockers/dev/Dockerfile index 765421db67..ffcdc07312 100644 --- a/dockers/dev/Dockerfile +++ b/dockers/dev/Dockerfile @@ -41,7 +41,7 @@ ENV LC_ALL=en_US.UTF-8 ENV ORG=vdaas ENV PKG=dev ENV REPO=vald -ENV RUST_HOME=/usr/loacl/lib/rust +ENV RUST_HOME=/usr/local/lib/rust ENV TZ=Etc/UTC ENV USER=root ENV RUSTUP_HOME=${RUST_HOME}/rustup @@ -52,11 +52,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -133,7 +135,6 @@ RUN --mount=type=bind,target=.,rw \ && make kubelinter/install \ && make reviewdog/install \ && make tparse/install \ - && make valdcli/install \ && make yq/install \ && make minikube/install \ && make stern/install \ @@ -142,4 +143,4 @@ RUN --mount=type=bind,target=.,rw \ && make faiss/install \ && rm -rf ${GOPATH}/src/github.com/${ORG}/${REPO}/* # skipcq: DOK-DL3002 -USER root:root \ No newline at end of file +USER root:root diff --git a/dockers/discoverer/k8s/Dockerfile b/dockers/discoverer/k8s/Dockerfile index 19e5953ee2..a9bf1e5781 100644 --- a/dockers/discoverer/k8s/Dockerfile +++ b/dockers/discoverer/k8s/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -83,4 +85,4 @@ COPY --from=builder /usr/bin/discoverer /usr/bin/discoverer COPY cmd/discoverer/k8s/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/discoverer"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/discoverer"] diff --git a/dockers/gateway/filter/Dockerfile b/dockers/gateway/filter/Dockerfile index b523b5e5b8..4ddc9858e9 100644 --- a/dockers/gateway/filter/Dockerfile +++ b/dockers/gateway/filter/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -83,4 +85,4 @@ COPY --from=builder /usr/bin/filter /usr/bin/filter COPY cmd/gateway/filter/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/filter"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/filter"] diff --git a/dockers/gateway/lb/Dockerfile b/dockers/gateway/lb/Dockerfile index 2f8c91768b..c3bd773deb 100644 --- a/dockers/gateway/lb/Dockerfile +++ b/dockers/gateway/lb/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -83,4 +85,4 @@ COPY --from=builder /usr/bin/lb /usr/bin/lb COPY cmd/gateway/lb/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/lb"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/lb"] diff --git a/dockers/gateway/mirror/Dockerfile b/dockers/gateway/mirror/Dockerfile index 9b97231c74..693d891601 100644 --- a/dockers/gateway/mirror/Dockerfile +++ b/dockers/gateway/mirror/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -83,4 +85,4 @@ COPY --from=builder /usr/bin/mirror /usr/bin/mirror COPY cmd/gateway/mirror/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/mirror"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/mirror"] diff --git a/dockers/index/job/correction/Dockerfile b/dockers/index/job/correction/Dockerfile index 01e3818c56..1dbefb3c48 100644 --- a/dockers/index/job/correction/Dockerfile +++ b/dockers/index/job/correction/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -83,4 +85,4 @@ COPY --from=builder /usr/bin/index-correction /usr/bin/index-correction COPY cmd/index/job/correction/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-correction"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-correction"] diff --git a/dockers/index/job/creation/Dockerfile b/dockers/index/job/creation/Dockerfile index d656b3ad22..13576d6dc6 100644 --- a/dockers/index/job/creation/Dockerfile +++ b/dockers/index/job/creation/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -83,4 +85,4 @@ COPY --from=builder /usr/bin/index-creation /usr/bin/index-creation COPY cmd/index/job/creation/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-creation"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-creation"] diff --git a/dockers/index/job/readreplica/rotate/Dockerfile b/dockers/index/job/readreplica/rotate/Dockerfile index bdb0ec7664..d443e0fcc0 100644 --- a/dockers/index/job/readreplica/rotate/Dockerfile +++ b/dockers/index/job/readreplica/rotate/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -83,4 +85,4 @@ COPY --from=builder /usr/bin/readreplica-rotate /usr/bin/readreplica-rotate COPY cmd/index/job/readreplica/rotate/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/readreplica-rotate"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/readreplica-rotate"] diff --git a/dockers/index/job/save/Dockerfile b/dockers/index/job/save/Dockerfile index fdd674abd4..9ef2ef629c 100644 --- a/dockers/index/job/save/Dockerfile +++ b/dockers/index/job/save/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -83,4 +85,4 @@ COPY --from=builder /usr/bin/index-save /usr/bin/index-save COPY cmd/index/job/save/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-save"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-save"] diff --git a/dockers/index/operator/Dockerfile b/dockers/index/operator/Dockerfile index 23e9aae514..d1ff09eec6 100644 --- a/dockers/index/operator/Dockerfile +++ b/dockers/index/operator/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -83,4 +85,4 @@ COPY --from=builder /usr/bin/index-operator /usr/bin/index-operator COPY cmd/index/operator/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-operator"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-operator"] diff --git a/dockers/manager/index/Dockerfile b/dockers/manager/index/Dockerfile index edecb98a6c..2fadc08ebb 100644 --- a/dockers/manager/index/Dockerfile +++ b/dockers/manager/index/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -83,4 +85,4 @@ COPY --from=builder /usr/bin/index /usr/bin/index COPY cmd/manager/index/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index"] diff --git a/dockers/operator/helm/Dockerfile b/dockers/operator/helm/Dockerfile index db24094790..049914cba3 100644 --- a/dockers/operator/helm/Dockerfile +++ b/dockers/operator/helm/Dockerfile @@ -51,11 +51,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -104,4 +106,4 @@ COPY --from=builder /opt/helm/charts/vald /opt/helm/charts/vald COPY --from=builder /opt/helm/charts/vald-helm-operator /opt/helm/charts/vald-helm-operator # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/helm-operator", "run", "--watches-file=/opt/helm/watches.yaml"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/helm-operator", "run", "--watches-file=/opt/helm/watches.yaml"] diff --git a/dockers/tools/benchmark/job/Dockerfile b/dockers/tools/benchmark/job/Dockerfile index 65da31d468..cb2669cb87 100644 --- a/dockers/tools/benchmark/job/Dockerfile +++ b/dockers/tools/benchmark/job/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -91,4 +93,4 @@ COPY --from=builder /usr/bin/job /usr/bin/job COPY cmd/tools/benchmark/job/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/job"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/job"] diff --git a/dockers/tools/benchmark/operator/Dockerfile b/dockers/tools/benchmark/operator/Dockerfile index 283773b11e..f99fa41491 100644 --- a/dockers/tools/benchmark/operator/Dockerfile +++ b/dockers/tools/benchmark/operator/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -83,4 +85,4 @@ COPY --from=builder /usr/bin/operator /usr/bin/operator COPY cmd/tools/benchmark/operator/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/operator"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/operator"] diff --git a/dockers/tools/cli/loadtest/Dockerfile b/dockers/tools/cli/loadtest/Dockerfile index ffbea81b05..594c94eb99 100644 --- a/dockers/tools/cli/loadtest/Dockerfile +++ b/dockers/tools/cli/loadtest/Dockerfile @@ -47,11 +47,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -91,4 +93,4 @@ COPY --from=builder /usr/bin/loadtest /usr/bin/loadtest COPY cmd/tools/cli/loadtest/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/loadtest"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/loadtest"] diff --git a/docs/contributing/unit-test-guideline.md b/docs/contributing/unit-test-guideline.md index 833e65addf..41341bd704 100644 --- a/docs/contributing/unit-test-guideline.md +++ b/docs/contributing/unit-test-guideline.md @@ -128,7 +128,7 @@ You have to create unit tests for error patterns as the same as success patterns #### Advanced -##### Robust boudary test +##### Robust boundary test The previous section is about the basic test cases. The (robust) boundary test should be applied to cover more test coverage. diff --git a/docs/user-guides/observability-configuration.md b/docs/user-guides/observability-configuration.md index 677781f191..db965a9417 100644 --- a/docs/user-guides/observability-configuration.md +++ b/docs/user-guides/observability-configuration.md @@ -176,7 +176,7 @@ defaults: #### Specify the Telemetry attribute -You can add the component information to the attribute of telemetry data by editing `defaults.observability.otlp.attirbute`. +You can add the component information to the attribute of telemetry data by editing `defaults.observability.otlp.attribute`. E.g., when setting `vald-agent-ngt-0` as `agent.observability.otlp.attribute.pod_name`, `target_pod: vald-agent-ngt-0` will be added to the attribute. These attributes are set auto by the environment values, so Vald recommends using default values unless there is a specific reason. diff --git a/example/client/go.mod b/example/client/go.mod index 977d0df4ce..91b9c6fe62 100644 --- a/example/client/go.mod +++ b/example/client/go.mod @@ -1,6 +1,6 @@ module github.com/vdaas/vald/example/client -go 1.23.0 +go 1.23.1 replace ( github.com/envoyproxy/protoc-gen-validate => github.com/envoyproxy/protoc-gen-validate v1.1.0 @@ -8,13 +8,13 @@ replace ( github.com/golang/protobuf => github.com/golang/protobuf v1.5.4 github.com/kpango/glg => github.com/kpango/glg v1.6.15 github.com/pkg/sftp => github.com/pkg/sftp v1.13.6 - golang.org/x/crypto => golang.org/x/crypto v0.26.0 - golang.org/x/net => golang.org/x/net v0.28.0 - golang.org/x/text => golang.org/x/text v0.17.0 - google.golang.org/genproto => google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/grpc => google.golang.org/grpc v1.65.0 + golang.org/x/crypto => golang.org/x/crypto v0.27.0 + golang.org/x/net => golang.org/x/net v0.29.0 + golang.org/x/text => golang.org/x/text v0.18.0 + google.golang.org/genproto => google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/grpc => google.golang.org/grpc v1.66.1 google.golang.org/protobuf => google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1 @@ -25,18 +25,18 @@ require ( github.com/kpango/glg v1.6.14 github.com/vdaas/vald-client-go v1.7.13 gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946 - google.golang.org/grpc v1.64.1 + google.golang.org/grpc v1.66.0 ) require ( buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.34.2-20240717164558-a6c49f84cc0f.2 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/kpango/fastime v1.1.9 // indirect - github.com/planetscale/vtprotobuf v0.6.0 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect golang.org/x/net v0.26.0 // indirect - golang.org/x/sys v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.18.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect google.golang.org/protobuf v1.34.2 // indirect ) diff --git a/example/client/go.mod.default b/example/client/go.mod.default index 127dcf1c62..a1ab487b5a 100644 --- a/example/client/go.mod.default +++ b/example/client/go.mod.default @@ -1,6 +1,6 @@ module github.com/vdaas/vald/example/client -go 1.23.0 +go 1.23.1 replace ( github.com/envoyproxy/protoc-gen-validate => github.com/envoyproxy/protoc-gen-validate latest diff --git a/example/client/go.sum b/example/client/go.sum index ddee44829b..90e30cb0d1 100644 --- a/example/client/go.sum +++ b/example/client/go.sum @@ -10,8 +10,8 @@ github.com/kpango/fuid v0.0.0-20221203053508-503b5ad89aa1 h1:rxyM+7uaZQ35P9fbixd github.com/kpango/fuid v0.0.0-20221203053508-503b5ad89aa1/go.mod h1:CAYeq6us9NfnRkSz67/xKVIR6/vaY5ZQZRe6IVcaIKg= github.com/kpango/glg v1.6.15 h1:nw0xSxpSyrDIWHeb3dvnE08PW+SCbK+aYFETT75IeLA= github.com/kpango/glg v1.6.15/go.mod h1:cmsc7Yeu8AS3wHLmN7bhwENXOpxfq+QoqxCIk2FneRk= -github.com/planetscale/vtprotobuf v0.6.0 h1:nBeETjudeJ5ZgBHUz1fVHvbqUKnYOXNhsIEabROxmNA= -github.com/planetscale/vtprotobuf v0.6.0/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/vdaas/vald-client-go v1.7.13 h1:WGhy3buxn5ECgySfxd/t8ZCooF6UfZuwy6kqfKoFP7c= @@ -22,19 +22,19 @@ go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946 h1:vJpL69PeUullhJyKtTjHjENEmZU3BkO4e+fod7nKzgM= gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946/go.mod h1:BQUWDHIAygjdt1HnUPQ0eWqLN2n5FwJycrpYUVUOx2I= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.66.1 h1:hO5qAXR19+/Z44hmvIM4dQFMSYX9XcWsByfoxutBpAM= +google.golang.org/grpc v1.66.1/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= diff --git a/go.mod b/go.mod index 3e63824046..de77307da0 100644 --- a/go.mod +++ b/go.mod @@ -1,21 +1,21 @@ module github.com/vdaas/vald -go 1.23.0 +go 1.23.1 replace ( cloud.google.com/go => cloud.google.com/go v0.115.1 cloud.google.com/go/bigquery => cloud.google.com/go/bigquery v1.62.0 - cloud.google.com/go/compute => cloud.google.com/go/compute v1.27.5 - cloud.google.com/go/datastore => cloud.google.com/go/datastore v1.17.1 + cloud.google.com/go/compute => cloud.google.com/go/compute v1.28.0 + cloud.google.com/go/datastore => cloud.google.com/go/datastore v1.19.0 cloud.google.com/go/firestore => cloud.google.com/go/firestore v1.16.0 - cloud.google.com/go/iam => cloud.google.com/go/iam v1.1.13 - cloud.google.com/go/kms => cloud.google.com/go/kms v1.18.5 - cloud.google.com/go/monitoring => cloud.google.com/go/monitoring v1.20.4 - cloud.google.com/go/pubsub => cloud.google.com/go/pubsub v1.41.0 - cloud.google.com/go/secretmanager => cloud.google.com/go/secretmanager v1.13.6 + cloud.google.com/go/iam => cloud.google.com/go/iam v1.2.0 + cloud.google.com/go/kms => cloud.google.com/go/kms v1.19.0 + cloud.google.com/go/monitoring => cloud.google.com/go/monitoring v1.21.0 + cloud.google.com/go/pubsub => cloud.google.com/go/pubsub v1.43.0 + cloud.google.com/go/secretmanager => cloud.google.com/go/secretmanager v1.14.0 cloud.google.com/go/storage => cloud.google.com/go/storage v1.43.0 - cloud.google.com/go/trace => cloud.google.com/go/trace v1.10.12 - code.cloudfoundry.org/bytefmt => code.cloudfoundry.org/bytefmt v0.1.0 + cloud.google.com/go/trace => cloud.google.com/go/trace v1.11.0 + code.cloudfoundry.org/bytefmt => code.cloudfoundry.org/bytefmt v0.9.0 contrib.go.opencensus.io/exporter/aws => contrib.go.opencensus.io/exporter/aws v0.0.0-20230502192102-15967c811cec contrib.go.opencensus.io/exporter/prometheus => contrib.go.opencensus.io/exporter/prometheus v0.4.2 contrib.go.opencensus.io/integrations/ocsql => contrib.go.opencensus.io/integrations/ocsql v0.1.7 @@ -25,7 +25,7 @@ replace ( github.com/Azure/azure-sdk-for-go/sdk/azcore => github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity => github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/internal => github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 - github.com/Azure/go-amqp => github.com/Azure/go-amqp v1.0.5 + github.com/Azure/go-amqp => github.com/Azure/go-amqp v1.1.0 github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.1-0.20240530140449-f7ea664c9cff+incompatible github.com/Azure/go-autorest/autorest => github.com/Azure/go-autorest/autorest v0.11.30-0.20240530140449-f7ea664c9cff github.com/Azure/go-autorest/autorest/adal => github.com/Azure/go-autorest/autorest/adal v0.9.24 @@ -37,35 +37,35 @@ replace ( github.com/BurntSushi/toml => github.com/BurntSushi/toml v1.4.0 github.com/DATA-DOG/go-sqlmock => github.com/DATA-DOG/go-sqlmock v1.5.2 github.com/GoogleCloudPlatform/cloudsql-proxy => github.com/GoogleCloudPlatform/cloudsql-proxy v1.37.0 - github.com/Masterminds/semver/v3 => github.com/Masterminds/semver/v3 v3.2.1 - github.com/ajstarks/deck => github.com/ajstarks/deck v0.0.0-20240814155529-0478e0c25be8 - github.com/ajstarks/deck/generate => github.com/ajstarks/deck/generate v0.0.0-20240814155529-0478e0c25be8 + github.com/Masterminds/semver/v3 => github.com/Masterminds/semver/v3 v3.3.0 + github.com/ajstarks/deck => github.com/ajstarks/deck v0.0.0-20240828115917-88fc45aa28b1 + github.com/ajstarks/deck/generate => github.com/ajstarks/deck/generate v0.0.0-20240828115917-88fc45aa28b1 github.com/ajstarks/svgo => github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b github.com/akrylysov/pogreb => github.com/akrylysov/pogreb v0.10.2 github.com/antihax/optional => github.com/antihax/optional v1.0.0 github.com/armon/go-socks5 => github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/aws/aws-sdk-go => github.com/aws/aws-sdk-go v1.55.5 - github.com/aws/aws-sdk-go-v2 => github.com/aws/aws-sdk-go-v2 v1.30.4 + github.com/aws/aws-sdk-go-v2 => github.com/aws/aws-sdk-go-v2 v1.30.5 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream => github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 - github.com/aws/aws-sdk-go-v2/config => github.com/aws/aws-sdk-go-v2/config v1.27.28 - github.com/aws/aws-sdk-go-v2/credentials => github.com/aws/aws-sdk-go-v2/credentials v1.17.28 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds => github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 - github.com/aws/aws-sdk-go-v2/feature/s3/manager => github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.11 - github.com/aws/aws-sdk-go-v2/internal/configsources => github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 => github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 + github.com/aws/aws-sdk-go-v2/config => github.com/aws/aws-sdk-go-v2/config v1.27.33 + github.com/aws/aws-sdk-go-v2/credentials => github.com/aws/aws-sdk-go-v2/credentials v1.17.32 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds => github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 + github.com/aws/aws-sdk-go-v2/feature/s3/manager => github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18 + github.com/aws/aws-sdk-go-v2/internal/configsources => github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 => github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 github.com/aws/aws-sdk-go-v2/internal/ini => github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding => github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 - github.com/aws/aws-sdk-go-v2/service/internal/checksum => github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url => github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 - github.com/aws/aws-sdk-go-v2/service/internal/s3shared => github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 - github.com/aws/aws-sdk-go-v2/service/kms => github.com/aws/aws-sdk-go-v2/service/kms v1.35.4 - github.com/aws/aws-sdk-go-v2/service/s3 => github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 - github.com/aws/aws-sdk-go-v2/service/secretsmanager => github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.5 - github.com/aws/aws-sdk-go-v2/service/sns => github.com/aws/aws-sdk-go-v2/service/sns v1.31.4 - github.com/aws/aws-sdk-go-v2/service/sqs => github.com/aws/aws-sdk-go-v2/service/sqs v1.34.4 - github.com/aws/aws-sdk-go-v2/service/ssm => github.com/aws/aws-sdk-go-v2/service/ssm v1.52.5 - github.com/aws/aws-sdk-go-v2/service/sso => github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 - github.com/aws/aws-sdk-go-v2/service/sts => github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 + github.com/aws/aws-sdk-go-v2/service/internal/checksum => github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url => github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 + github.com/aws/aws-sdk-go-v2/service/internal/s3shared => github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 + github.com/aws/aws-sdk-go-v2/service/kms => github.com/aws/aws-sdk-go-v2/service/kms v1.35.7 + github.com/aws/aws-sdk-go-v2/service/s3 => github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2 + github.com/aws/aws-sdk-go-v2/service/secretsmanager => github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.8 + github.com/aws/aws-sdk-go-v2/service/sns => github.com/aws/aws-sdk-go-v2/service/sns v1.31.7 + github.com/aws/aws-sdk-go-v2/service/sqs => github.com/aws/aws-sdk-go-v2/service/sqs v1.34.8 + github.com/aws/aws-sdk-go-v2/service/ssm => github.com/aws/aws-sdk-go-v2/service/ssm v1.52.8 + github.com/aws/aws-sdk-go-v2/service/sso => github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 + github.com/aws/aws-sdk-go-v2/service/sts => github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 github.com/aws/smithy-go => github.com/aws/smithy-go v1.20.4 github.com/benbjohnson/clock => github.com/benbjohnson/clock v1.3.5 github.com/beorn7/perks => github.com/beorn7/perks v1.0.1 @@ -79,7 +79,7 @@ replace ( github.com/chzyer/readline => github.com/chzyer/readline v1.5.1 github.com/chzyer/test => github.com/chzyer/test v1.0.0 github.com/cncf/udpa/go => github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe - github.com/cncf/xds/go => github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 + github.com/cncf/xds/go => github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 github.com/cockroachdb/apd => github.com/cockroachdb/apd v1.1.0 github.com/coreos/go-systemd/v22 => github.com/coreos/go-systemd/v22 v22.5.0 github.com/cpuguy83/go-md2man/v2 => github.com/cpuguy83/go-md2man/v2 v2.0.4 @@ -121,7 +121,7 @@ replace ( github.com/go-playground/assert/v2 => github.com/go-playground/assert/v2 v2.2.0 github.com/go-playground/locales => github.com/go-playground/locales v0.14.1 github.com/go-playground/universal-translator => github.com/go-playground/universal-translator v0.18.1 - github.com/go-playground/validator/v10 => github.com/go-playground/validator/v10 v10.22.0 + github.com/go-playground/validator/v10 => github.com/go-playground/validator/v10 v10.22.1 github.com/go-redis/redis/v8 => github.com/go-redis/redis/v8 v8.11.5 github.com/go-sql-driver/mysql => github.com/go-sql-driver/mysql v1.8.1 github.com/go-task/slim-sprig => github.com/go-task/slim-sprig v2.20.0+incompatible @@ -146,7 +146,7 @@ replace ( github.com/golang/mock => github.com/golang/mock v1.6.0 github.com/golang/protobuf => github.com/golang/protobuf v1.5.4 github.com/golang/snappy => github.com/golang/snappy v0.0.4 - github.com/google/btree => github.com/google/btree v1.1.2 + github.com/google/btree => github.com/google/btree v1.1.3 github.com/google/gnostic => github.com/google/gnostic v0.7.0 github.com/google/go-cmp => github.com/google/go-cmp v0.6.0 github.com/google/go-replayers/grpcreplay => github.com/google/go-replayers/grpcreplay v1.3.0 @@ -154,7 +154,7 @@ replace ( github.com/google/gofuzz => github.com/google/gofuzz v1.2.0 github.com/google/martian => github.com/google/martian v2.1.0+incompatible github.com/google/martian/v3 => github.com/google/martian/v3 v3.3.3 - github.com/google/pprof => github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 + github.com/google/pprof => github.com/google/pprof v0.0.0-20240903155634-a8630aee4ab9 github.com/google/shlex => github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/subcommands => github.com/google/subcommands v1.2.0 github.com/google/uuid => github.com/google/uuid v1.6.0 @@ -192,7 +192,7 @@ replace ( github.com/jstemmer/go-junit-report => github.com/jstemmer/go-junit-report v1.0.0 github.com/kisielk/errcheck => github.com/kisielk/errcheck v1.7.0 github.com/kisielk/gotool => github.com/kisielk/gotool v1.0.0 - github.com/klauspost/compress => github.com/klauspost/compress v1.17.10-0.20240812095115-3868468e621b + github.com/klauspost/compress => github.com/klauspost/compress v1.17.10-0.20240903161129-13c124496702 github.com/klauspost/cpuid/v2 => github.com/klauspost/cpuid/v2 v2.2.8 github.com/kpango/fastime => github.com/kpango/fastime v1.1.9 github.com/kpango/fuid => github.com/kpango/fuid v0.0.0-20221203053508-503b5ad89aa1 @@ -211,7 +211,7 @@ replace ( github.com/mailru/easyjson => github.com/mailru/easyjson v0.7.7 github.com/mattn/go-colorable => github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty => github.com/mattn/go-isatty v0.0.20 - github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.14.22 + github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.14.23 github.com/matttproud/golang_protobuf_extensions => github.com/matttproud/golang_protobuf_extensions v1.0.4 github.com/mitchellh/colorstring => github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db github.com/moby/spdystream => github.com/moby/spdystream v0.5.0 @@ -225,8 +225,8 @@ replace ( github.com/niemeyer/pretty => github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e github.com/nxadm/tail => github.com/nxadm/tail v1.4.11 github.com/onsi/ginkgo => github.com/onsi/ginkgo v1.16.5 - github.com/onsi/ginkgo/v2 => github.com/onsi/ginkgo/v2 v2.20.0 - github.com/onsi/gomega => github.com/onsi/gomega v1.34.1 + github.com/onsi/ginkgo/v2 => github.com/onsi/ginkgo/v2 v2.20.2 + github.com/onsi/gomega => github.com/onsi/gomega v1.34.2 github.com/peterbourgon/diskv => github.com/peterbourgon/diskv v2.0.1+incompatible github.com/phpdave11/gofpdf => github.com/phpdave11/gofpdf v1.4.2 github.com/phpdave11/gofpdi => github.com/phpdave11/gofpdi v1.0.13 @@ -237,9 +237,9 @@ replace ( github.com/pkg/sftp => github.com/pkg/sftp v1.13.6 github.com/pmezard/go-difflib => github.com/pmezard/go-difflib v1.0.0 github.com/prashantv/gostub => github.com/prashantv/gostub v1.1.0 - github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.20.0 + github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.20.3 github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common => github.com/prometheus/common v0.55.0 + github.com/prometheus/common => github.com/prometheus/common v0.59.1 github.com/prometheus/procfs => github.com/prometheus/procfs v0.15.1 github.com/prometheus/prometheus => github.com/prometheus/prometheus v1.99.0 github.com/quasilyte/go-ruleguard => github.com/quasilyte/go-ruleguard v0.4.2 @@ -248,7 +248,7 @@ replace ( github.com/quasilyte/stdinfo => github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 github.com/rogpeppe/fastuuid => github.com/rogpeppe/fastuuid v1.2.0 github.com/rogpeppe/go-internal => github.com/rogpeppe/go-internal v1.12.0 - github.com/rs/xid => github.com/rs/xid v1.5.0 + github.com/rs/xid => github.com/rs/xid v1.6.0 github.com/rs/zerolog => github.com/rs/zerolog v1.33.0 github.com/russross/blackfriday/v2 => github.com/russross/blackfriday/v2 v2.1.0 github.com/ruudk/golang-pdf417 => github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245 @@ -273,19 +273,19 @@ replace ( github.com/xlab/treeprint => github.com/xlab/treeprint v1.2.0 github.com/zeebo/assert => github.com/zeebo/assert v1.3.1 github.com/zeebo/xxh3 => github.com/zeebo/xxh3 v1.0.2 - go.etcd.io/bbolt => go.etcd.io/bbolt v1.3.10 + go.etcd.io/bbolt => go.etcd.io/bbolt v1.3.11 go.opencensus.io => go.opencensus.io v0.24.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 - go.opentelemetry.io/otel => go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 + go.opentelemetry.io/otel => go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/otlp/internal/retry => go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.17.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric => go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.43.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace => go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 - go.opentelemetry.io/otel/metric => go.opentelemetry.io/otel/metric v1.28.0 - go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/sdk/metric => go.opentelemetry.io/otel/sdk/metric v1.28.0 - go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace => go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 + go.opentelemetry.io/otel/metric => go.opentelemetry.io/otel/metric v1.29.0 + go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v1.29.0 + go.opentelemetry.io/otel/sdk/metric => go.opentelemetry.io/otel/sdk/metric v1.29.0 + go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v1.29.0 go.opentelemetry.io/proto/otlp => go.opentelemetry.io/proto/otlp v1.3.1 go.starlark.net => go.starlark.net v0.0.0-20240725214946-42030a7cedce go.uber.org/atomic => go.uber.org/atomic v1.11.0 @@ -294,32 +294,32 @@ replace ( go.uber.org/multierr => go.uber.org/multierr v1.11.0 go.uber.org/zap => go.uber.org/zap v1.27.0 gocloud.dev => gocloud.dev v0.39.0 - golang.org/x/crypto => golang.org/x/crypto v0.26.0 - golang.org/x/exp => golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa - golang.org/x/exp/typeparams => golang.org/x/exp/typeparams v0.0.0-20240808152545-0cdaa3abc0fa - golang.org/x/image => golang.org/x/image v0.19.0 + golang.org/x/crypto => golang.org/x/crypto v0.27.0 + golang.org/x/exp => golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 + golang.org/x/exp/typeparams => golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0 + golang.org/x/image => golang.org/x/image v0.20.0 golang.org/x/lint => golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/mobile => golang.org/x/mobile v0.0.0-20240806205939-81131f6468ab - golang.org/x/mod => golang.org/x/mod v0.20.0 - golang.org/x/net => golang.org/x/net v0.28.0 - golang.org/x/oauth2 => golang.org/x/oauth2 v0.22.0 + golang.org/x/mobile => golang.org/x/mobile v0.0.0-20240909163608-642950227fb3 + golang.org/x/mod => golang.org/x/mod v0.21.0 + golang.org/x/net => golang.org/x/net v0.29.0 + golang.org/x/oauth2 => golang.org/x/oauth2 v0.23.0 golang.org/x/sync => golang.org/x/sync v0.8.0 - golang.org/x/sys => golang.org/x/sys v0.24.0 - golang.org/x/term => golang.org/x/term v0.23.0 - golang.org/x/text => golang.org/x/text v0.17.0 + golang.org/x/sys => golang.org/x/sys v0.25.0 + golang.org/x/term => golang.org/x/term v0.24.0 + golang.org/x/text => golang.org/x/text v0.18.0 golang.org/x/time => golang.org/x/time v0.6.0 - golang.org/x/tools => golang.org/x/tools v0.24.0 - golang.org/x/xerrors => golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 + golang.org/x/tools => golang.org/x/tools v0.25.0 + golang.org/x/xerrors => golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da gomodules.xyz/jsonpatch/v2 => gomodules.xyz/jsonpatch/v2 v2.4.0 gonum.org/v1/gonum => gonum.org/v1/gonum v0.15.1 gonum.org/v1/hdf5 => gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946 gonum.org/v1/plot => gonum.org/v1/plot v0.14.0 - google.golang.org/api => google.golang.org/api v0.192.0 + google.golang.org/api => google.golang.org/api v0.196.0 google.golang.org/appengine => google.golang.org/appengine v1.6.8 - google.golang.org/genproto => google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/grpc => google.golang.org/grpc v1.65.0 + google.golang.org/genproto => google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/grpc => google.golang.org/grpc v1.66.1 google.golang.org/grpc/cmd/protoc-gen-go-grpc => google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 google.golang.org/protobuf => google.golang.org/protobuf v1.34.2 gopkg.in/check.v1 => gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c @@ -336,7 +336,7 @@ replace ( k8s.io/client-go => k8s.io/client-go v0.30.3 k8s.io/component-base => k8s.io/component-base v0.30.3 k8s.io/klog/v2 => k8s.io/klog/v2 v2.130.1 - k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 k8s.io/kubernetes => k8s.io/kubernetes v0.30.3 k8s.io/metrics => k8s.io/metrics v0.30.3 nhooyr.io/websocket => nhooyr.io/websocket v1.8.17 @@ -354,7 +354,7 @@ require ( code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6 github.com/akrylysov/pogreb v0.0.0-00010101000000-000000000000 github.com/aws/aws-sdk-go v1.55.5 - github.com/felixge/fgprof v0.9.4 + github.com/felixge/fgprof v0.9.5 github.com/fsnotify/fsnotify v1.7.0 github.com/go-redis/redis/v8 v8.0.0-00010101000000-000000000000 github.com/go-sql-driver/mysql v1.8.1 @@ -382,32 +382,32 @@ require ( github.com/stretchr/testify v1.9.0 github.com/zeebo/xxh3 v1.0.2 go.etcd.io/bbolt v1.3.8 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 - go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 + go.opentelemetry.io/otel v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.0.0-00010101000000-000000000000 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 - go.opentelemetry.io/otel/metric v1.28.0 - go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/sdk/metric v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel/metric v1.29.0 + go.opentelemetry.io/otel/sdk v1.29.0 + go.opentelemetry.io/otel/sdk/metric v1.29.0 + go.opentelemetry.io/otel/trace v1.29.0 go.uber.org/automaxprocs v0.0.0-00010101000000-000000000000 go.uber.org/goleak v1.3.0 go.uber.org/ratelimit v0.3.1 go.uber.org/zap v1.27.0 gocloud.dev v0.0.0-00010101000000-000000000000 - golang.org/x/net v0.28.0 - golang.org/x/oauth2 v0.22.0 + golang.org/x/net v0.29.0 + golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.24.0 - golang.org/x/text v0.17.0 + golang.org/x/sys v0.25.0 + golang.org/x/text v0.18.0 golang.org/x/time v0.6.0 - golang.org/x/tools v0.24.0 + golang.org/x/tools v0.25.0 gonum.org/v1/hdf5 v0.0.0-00010101000000-000000000000 gonum.org/v1/plot v0.14.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/grpc v1.65.0 + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 + google.golang.org/grpc v1.66.0 google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.30.3 @@ -415,17 +415,17 @@ require ( k8s.io/cli-runtime v0.0.0-00010101000000-000000000000 k8s.io/client-go v0.30.3 k8s.io/metrics v0.0.0-00010101000000-000000000000 - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 sigs.k8s.io/controller-runtime v0.0.0-00010101000000-000000000000 sigs.k8s.io/yaml v1.4.0 ) require ( - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.8.1 // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/auth v0.9.3 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.13 // indirect + cloud.google.com/go/iam v1.2.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect git.sr.ht/~sbinet/gg v0.5.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect @@ -437,7 +437,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -447,7 +447,7 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-pdf/fpdf v0.9.0 // indirect github.com/go-toolsmith/astcopy v1.0.2 // indirect @@ -458,18 +458,18 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/btree v1.1.2 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/pprof v0.0.0-20240903155634-a8630aee4ab9 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/wire v0.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.3 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -489,7 +489,7 @@ require ( github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect @@ -501,20 +501,20 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.26.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/crypto v0.27.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/exp/typeparams v0.0.0-20240213143201-ec583247a57a // indirect - golang.org/x/image v0.19.0 // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/term v0.23.0 // indirect + golang.org/x/image v0.20.0 // indirect + golang.org/x/mod v0.21.0 // indirect + golang.org/x/term v0.24.0 // indirect golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/api v0.191.0 // indirect - google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 // indirect + google.golang.org/api v0.196.0 // indirect + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.30.1 // indirect diff --git a/go.sum b/go.sum index 309a52862a..9c20e890d4 100644 --- a/go.sum +++ b/go.sum @@ -1,22 +1,23 @@ buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.34.2-20240717164558-a6c49f84cc0f.2 h1:SZRVx928rbYZ6hEKUIN+vtGDkl7uotABRWGY4OAg5gM= buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.34.2-20240717164558-a6c49f84cc0f.2/go.mod h1:ylS4c28ACSI59oJrOdW4pHS4n0Hw4TgSPHn8rpHl4Yw= -cel.dev/expr v0.15.0 h1:O1jzfJCQBfL5BFoYktaxwIhuttaQPsVWerH9/EEKx0w= cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cel.dev/expr v0.16.0 h1:yloc84fytn4zmJX2GU3TkXGsaieaV7dQ057Qs4sIG2Y= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= -cloud.google.com/go/accessapproval v1.7.11/go.mod h1:KGK3+CLDWm4BvjN0wFtZqdFUGhxlTvTF6PhAwQJGL4M= -cloud.google.com/go/accesscontextmanager v1.8.9/go.mod h1:IXvQesVgOC7aXgK9OpYFn5eWnzz8fazegIiJ5WnCOVw= -cloud.google.com/go/accesscontextmanager v1.8.11/go.mod h1:nwPysISS3KR5qXipAU6cW/UbDavDdTBBgPohbkhGSok= +cloud.google.com/go/accessapproval v1.8.0/go.mod h1:ycc7qSIXOrH6gGOGQsuBwpRZw3QhZLi0OWeej3rA5Mg= +cloud.google.com/go/accesscontextmanager v1.8.12/go.mod h1:EmaVYmffq+2jA2waP0/XHECDkaOKVztxVsdzl65t8hw= +cloud.google.com/go/accesscontextmanager v1.9.0/go.mod h1:EmdQRGq5FHLrjGjGTp2X2tlRBvU3LDCUqfnysFYooxQ= cloud.google.com/go/aiplatform v1.68.0/go.mod h1:105MFA3svHjC3Oazl7yjXAmIR89LKhRAeNdnDKJczME= -cloud.google.com/go/analytics v0.23.6/go.mod h1:cFz5GwWHrWQi8OHKP9ep3Z4pvHgGcG9lPnFQ+8kXsNo= -cloud.google.com/go/apigateway v1.6.11/go.mod h1:4KsrYHn/kSWx8SNUgizvaz+lBZ4uZfU7mUDsGhmkWfM= -cloud.google.com/go/apigeeconnect v1.6.11/go.mod h1:iMQLTeKxtKL+sb0D+pFlS/TO6za2IUOh/cwMEtn/4g0= -cloud.google.com/go/apigeeregistry v0.8.9/go.mod h1:4XivwtSdfSO16XZdMEQDBCMCWDp3jkCBRhVgamQfLSA= -cloud.google.com/go/appengine v1.8.11/go.mod h1:xET3coaDUj+OP4TgnZlgQ+rG2R9fG2nblya13czP56Q= -cloud.google.com/go/area120 v0.8.11/go.mod h1:VBxJejRAJqeuzXQBbh5iHBYUkIjZk5UzFZLCXmzap2o= -cloud.google.com/go/artifactregistry v1.14.13/go.mod h1:zQ/T4xoAFPtcxshl+Q4TJBgsy7APYR/BLd2z3xEAqRA= -cloud.google.com/go/asset v1.19.5/go.mod h1:sqyLOYaLLfc4ACcn3YxqHno+J7lRt9NJTdO50zCUcY0= -cloud.google.com/go/assuredworkloads v1.11.11/go.mod h1:vaYs6+MHqJvLKYgZBOsuuOhBgNNIguhRU0Kt7JTGcnI= +cloud.google.com/go/analytics v0.25.0/go.mod h1:LZMfjJnKU1GDkvJV16dKnXm7KJJaMZfvUXx58ujgVLg= +cloud.google.com/go/apigateway v1.7.0/go.mod h1:miZGNhmrC+SFhxjA7ayjKHk1cA+7vsSINp9K+JxKwZI= +cloud.google.com/go/apigeeconnect v1.7.0/go.mod h1:fd8NFqzu5aXGEUpxiyeCyb4LBLU7B/xIPztfBQi+1zg= +cloud.google.com/go/apigeeregistry v0.9.0/go.mod h1:4S/btGnijdt9LSIZwBDHgtYfYkFGekzNyWkyYTP8Qzs= +cloud.google.com/go/appengine v1.9.0/go.mod h1:y5oI+JT3/6s77QmxbTnLHyiMKz3NPHYOjuhmVi+FyYU= +cloud.google.com/go/area120 v0.9.0/go.mod h1:ujIhRz2gJXutmFYGAUgz3KZ5IRJ6vOwL4CYlNy/jDo4= +cloud.google.com/go/artifactregistry v1.15.0/go.mod h1:4xrfigx32/3N7Pp7YSPOZZGs4VPhyYeRyJ67ZfVdOX4= +cloud.google.com/go/asset v1.20.0/go.mod h1:CT3ME6xNZKsPSvi0lMBPgW3azvRhiurJTFSnNl6ahw8= +cloud.google.com/go/assuredworkloads v1.12.0/go.mod h1:jX84R+0iANggmSbzvVgrGWaqdhRsQihAv4fF7IQ4r7Q= cloud.google.com/go/auth v0.2.1/go.mod h1:khQRBNrvNoHiHhV1iu2x8fSnlNbCaVHilznW5MAI5GY= cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w= cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= @@ -26,74 +27,76 @@ cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQS cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw= cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs= cloud.google.com/go/auth v0.8.0/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc= -cloud.google.com/go/auth v0.8.1 h1:QZW9FjC5lZzN864p13YxvAtGUlQ+KgRL+8Sg45Z6vxo= -cloud.google.com/go/auth v0.8.1/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc= +cloud.google.com/go/auth v0.9.0/go.mod h1:2HsApZBr9zGZhC9QAXsYVYaWk8kNUt37uny+XVKi7wM= +cloud.google.com/go/auth v0.9.1/go.mod h1:Sw8ocT5mhhXxFklyhT12Eiy0ed6tTrPMCJjSI8KhYLk= +cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= -cloud.google.com/go/automl v1.13.11/go.mod h1:oMJdXRDOVC+Eq3PnGhhxSut5Hm9TSyVx1aLEOgerOw8= -cloud.google.com/go/baremetalsolution v1.2.10/go.mod h1:eO2c2NMRy5ytcNPhG78KPsWGNsX5W/tUsCOWmYihx6I= -cloud.google.com/go/batch v1.9.2/go.mod h1:smqwS4sleDJVAEzBt/TzFfXLktmWjFNugGDWl8coKX4= -cloud.google.com/go/beyondcorp v1.0.10/go.mod h1:G09WxvxJASbxbrzaJUMVvNsB1ZiaKxpbtkjiFtpDtbo= +cloud.google.com/go/automl v1.14.0/go.mod h1:Kr7rN9ANSjlHyBLGvwhrnt35/vVZy3n/CP4Xmyj0shM= +cloud.google.com/go/baremetalsolution v1.3.0/go.mod h1:E+n44UaDVO5EeSa4SUsDFxQLt6dD1CoE2h+mtxxaJKo= +cloud.google.com/go/batch v1.10.0/go.mod h1:JlktZqyKbcUJWdHOV8juvAiQNH8xXHXTqLp6bD9qreE= +cloud.google.com/go/beyondcorp v1.1.0/go.mod h1:F6Rl20QbayaloWIsMhuz+DICcJxckdFKc7R2HCe6iNA= cloud.google.com/go/bigquery v1.62.0/go.mod h1:5ee+ZkF1x/ntgCsFQJAQTM3QkAZOecfCmvxhkJsWRSA= cloud.google.com/go/bigtable v1.18.1/go.mod h1:NAVyfJot9jlo+KmgWLUJ5DJGwNDoChzAcrecLpmuAmY= cloud.google.com/go/bigtable v1.20.0/go.mod h1:upJDn8frsjzpRMfybiWkD1PG6WCCL7CRl26MgVeoXY4= -cloud.google.com/go/bigtable v1.27.2-0.20240802230159-f371928b558f/go.mod h1:avmXcmxVbLJAo9moICRYMgDyTTPoV0MA0lHKnyqV4fQ= -cloud.google.com/go/billing v1.18.9/go.mod h1:bKTnh8MBfCMUT1fzZ936CPN9rZG7ZEiHB2J3SjIjByc= -cloud.google.com/go/binaryauthorization v1.8.7/go.mod h1:cRj4teQhOme5SbWQa96vTDATQdMftdT5324BznxANtg= -cloud.google.com/go/certificatemanager v1.8.5/go.mod h1:r2xINtJ/4xSz85VsqvjY53qdlrdCjyniib9Jp98ZKKM= -cloud.google.com/go/channel v1.17.11/go.mod h1:gjWCDBcTGQce/BSMoe2lAqhlq0dIRiZuktvBKXUawp0= -cloud.google.com/go/cloudbuild v1.16.5/go.mod h1:HXLpZ8QeYZgmDIWpbl9Gs22p6o6uScgQ/cV9HF9cIZU= -cloud.google.com/go/clouddms v1.7.10/go.mod h1:PzHELq0QDyA7VaD9z6mzh2mxeBz4kM6oDe8YxMxd4RA= -cloud.google.com/go/cloudtasks v1.12.12/go.mod h1:8UmM+duMrQpzzRREo0i3x3TrFjsgI/3FQw3664/JblA= -cloud.google.com/go/compute v1.27.5/go.mod h1:DfwDGujFTdSeiE8b8ZqadF/uxHFBz+ekGsk8Zfi9dTA= +cloud.google.com/go/bigtable v1.31.0/go.mod h1:N/mwZO+4TSHOeyiE1JxO+sRPnW4bnR7WLn9AEaiJqew= +cloud.google.com/go/billing v1.19.0/go.mod h1:bGvChbZguyaWRGmu5pQHfFN1VxTDPFmabnCVA/dNdRM= +cloud.google.com/go/binaryauthorization v1.9.0/go.mod h1:fssQuxfI9D6dPPqfvDmObof+ZBKsxA9iSigd8aSA1ik= +cloud.google.com/go/certificatemanager v1.9.0/go.mod h1:hQBpwtKNjUq+er6Rdg675N7lSsNGqMgt7Bt7Dbcm7d0= +cloud.google.com/go/channel v1.18.0/go.mod h1:gQr50HxC/FGvufmqXD631ldL1Ee7CNMU5F4pDyJWlt0= +cloud.google.com/go/cloudbuild v1.17.0/go.mod h1:/RbwgDlbQEwIKoWLIYnW72W3cWs+e83z7nU45xRKnj8= +cloud.google.com/go/clouddms v1.8.0/go.mod h1:JUgTgqd1M9iPa7p3jodjLTuecdkGTcikrg7nz++XB5E= +cloud.google.com/go/cloudtasks v1.13.0/go.mod h1:O1jFRGb1Vm3sN2u/tBdPiVGVTWIsrsbEs3K3N3nNlEU= +cloud.google.com/go/compute v1.28.0/go.mod h1:DEqZBtYrDnD5PvjsKwb3onnhX+qjdCVM7eshj1XdjV4= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M= cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/contactcenterinsights v1.13.6/go.mod h1:mL+DbN3pMQGaAbDC4wZhryLciwSwHf5Tfk4Itr72Zyk= -cloud.google.com/go/container v1.38.0/go.mod h1:U0uPBvkVWOJGY/0qTVuPS7NeafFEUsHSPqT5pB8+fCY= -cloud.google.com/go/containeranalysis v0.12.1/go.mod h1:+/lcJIQSFt45TC0N9Nq7/dPbl0isk6hnC4EvBBqyXsM= +cloud.google.com/go/contactcenterinsights v1.14.0/go.mod h1:APmWYHDN4sASnUBnXs4o68t1EUfnqadA53//CzXZ1xE= +cloud.google.com/go/container v1.39.0/go.mod h1:gNgnvs1cRHXjYxrotVm+0nxDfZkqzBbXCffh5WtqieI= +cloud.google.com/go/containeranalysis v0.13.0/go.mod h1:OpufGxsNzMOZb6w5yqwUgHr5GHivsAD18KEI06yGkQs= cloud.google.com/go/datacatalog v1.20.3/go.mod h1:AKC6vAy5urnMg5eJK3oUjy8oa5zMbiY33h125l8lmlo= -cloud.google.com/go/datacatalog v1.21.0/go.mod h1:DB0QWF9nelpsbB0eR/tA0xbHZZMvpoFD1XFy3Qv/McI= -cloud.google.com/go/dataflow v0.9.11/go.mod h1:CCLufd7I4pPfyp54qMgil/volrL2ZKYjXeYLfQmBGJs= -cloud.google.com/go/dataform v0.9.8/go.mod h1:cGJdyVdunN7tkeXHPNosuMzmryx55mp6cInYBgxN3oA= -cloud.google.com/go/datafusion v1.7.11/go.mod h1:aU9zoBHgYmoPp4dzccgm/Gi4xWDMXodSZlNZ4WNeptw= -cloud.google.com/go/datalabeling v0.8.11/go.mod h1:6IGUV3z7hlkAU5ndKVshv/8z+7pxE+k0qXsEjyzO1Xg= -cloud.google.com/go/dataplex v1.18.2/go.mod h1:NuBpJJMGGQn2xctX+foHEDKRbizwuiHJamKvvSteY3Q= -cloud.google.com/go/dataproc/v2 v2.5.3/go.mod h1:RgA5QR7v++3xfP7DlgY3DUmoDSTaaemPe0ayKrQfyeg= -cloud.google.com/go/dataqna v0.8.11/go.mod h1:74Icl1oFKKZXPd+W7YDtqJLa+VwLV6wZ+UF+sHo2QZQ= -cloud.google.com/go/datastore v1.17.1/go.mod h1:mtzZ2HcVtz90OVrEXXGDc2pO4NM1kiBQy8YV4qGe0ZM= -cloud.google.com/go/datastream v1.10.10/go.mod h1:NqchuNjhPlISvWbk426/AU/S+Kgv7srlID9P5XOAbtg= -cloud.google.com/go/deploy v1.21.0/go.mod h1:PaOfS47VrvmYnxG5vhHg0KU60cKeWcqyLbMBjxS8DW8= -cloud.google.com/go/dialogflow v1.55.0/go.mod h1:0u0hSlJiFpMkMpMNoFrQETwDjaRm8Q8hYKv+jz5JeRA= -cloud.google.com/go/dlp v1.16.0/go.mod h1:LtPZxZAenBXKzvWIOB2hdHIXuEcK0wW0En8//u+/nNA= -cloud.google.com/go/documentai v1.31.0/go.mod h1:5ajlDvaPyl9tc+K/jZE8WtYIqSXqAD33Z1YAYIjfad4= -cloud.google.com/go/domains v0.9.11/go.mod h1:efo5552kUyxsXEz30+RaoIS2lR7tp3M/rhiYtKXkhkk= -cloud.google.com/go/edgecontainer v1.2.5/go.mod h1:OAb6tElD3F3oBujFAup14PKOs9B/lYobTb6LARmoACY= +cloud.google.com/go/datacatalog v1.22.0/go.mod h1:4Wff6GphTY6guF5WphrD76jOdfBiflDiRGFAxq7t//I= +cloud.google.com/go/dataflow v0.10.0/go.mod h1:zAv3YUNe/2pXWKDSPvbf31mCIUuJa+IHtKmhfzaeGww= +cloud.google.com/go/dataform v0.10.0/go.mod h1:0NKefI6v1ppBEDnwrp6gOMEA3s/RH3ypLUM0+YWqh6A= +cloud.google.com/go/datafusion v1.8.0/go.mod h1:zHZ5dJYHhMP1P8SZDZm+6yRY9BCCcfm7Xg7YmP+iA6E= +cloud.google.com/go/datalabeling v0.9.0/go.mod h1:GVX4sW4cY5OPKu/9v6dv20AU9xmGr4DXR6K26qN0mzw= +cloud.google.com/go/dataplex v1.19.0/go.mod h1:5H9ftGuZWMtoEIUpTdGUtGgje36YGmtRXoC8wx6QSUc= +cloud.google.com/go/dataproc/v2 v2.6.0/go.mod h1:amsKInI+TU4GcXnz+gmmApYbiYM4Fw051SIMDoWCWeE= +cloud.google.com/go/dataqna v0.9.0/go.mod h1:WlRhvLLZv7TfpONlb/rEQx5Qrr7b5sxgSuz5NP6amrw= +cloud.google.com/go/datastore v1.19.0/go.mod h1:KGzkszuj87VT8tJe67GuB+qLolfsOt6bZq/KFuWaahc= +cloud.google.com/go/datastream v1.11.0/go.mod h1:vio/5TQ0qNtGcIj7sFb0gucFoqZW19gZ7HztYtkzq9g= +cloud.google.com/go/deploy v1.22.0/go.mod h1:qXJgBcnyetoOe+w/79sCC99c5PpHJsgUXCNhwMjG0e4= +cloud.google.com/go/dialogflow v1.57.0/go.mod h1:wegtnocuYEfue6IGlX96n5mHu3JGZUaZxv1L5HzJUJY= +cloud.google.com/go/dlp v1.18.0/go.mod h1:RVO9zkh+xXgUa7+YOf9IFNHL/2FXt9Vnv/GKNYmc1fE= +cloud.google.com/go/documentai v1.33.0/go.mod h1:lI9Mti9COZ5qVjdpfDZxNjOrTVf6tJ//vaqbtt81214= +cloud.google.com/go/domains v0.10.0/go.mod h1:VpPXnkCNRsxkieDFDfjBIrLv3p1kRjJ03wLoPeL30To= +cloud.google.com/go/edgecontainer v1.3.0/go.mod h1:dV1qTl2KAnQOYG+7plYr53KSq/37aga5/xPgOlYXh3A= cloud.google.com/go/errorreporting v0.3.1/go.mod h1:6xVQXU1UuntfAf+bVkFk6nld41+CPyF2NSPCyXE3Ztk= -cloud.google.com/go/essentialcontacts v1.6.12/go.mod h1:UGhWTIYewH8Ma4wDRJp8cMAHUCeAOCKsuwd6GLmmQLc= -cloud.google.com/go/eventarc v1.13.10/go.mod h1:KlCcOMApmUaqOEZUpZRVH+p0nnnsY1HaJB26U4X5KXE= -cloud.google.com/go/filestore v1.8.7/go.mod h1:dKfyH0YdPAKdYHqAR/bxZeil85Y5QmrEVQwIYuRjcXI= +cloud.google.com/go/essentialcontacts v1.7.0/go.mod h1:0JEcNuyjyg43H/RJynZzv2eo6MkmnvRPUouBpOh6akY= +cloud.google.com/go/eventarc v1.14.0/go.mod h1:60ZzZfOekvsc/keHc7uGHcoEOMVa+p+ZgRmTjpdamnA= +cloud.google.com/go/filestore v1.9.0/go.mod h1:GlQK+VBaAGb19HqprnOMqYYpn7Gev5ZA9SSHpxFKD7Q= cloud.google.com/go/firestore v1.16.0/go.mod h1:+22v/7p+WNBSQwdSwP57vz47aZiY+HrDkrOsJNhk7rg= -cloud.google.com/go/functions v1.16.6/go.mod h1:wOzZakhMueNQaBUJdf0yjsJIe0GBRu+ZTvdSTzqHLs0= -cloud.google.com/go/gkebackup v1.5.4/go.mod h1:V+llvHlRD0bCyrkYaAMJX+CHralceQcaOWjNQs8/Ymw= -cloud.google.com/go/gkeconnect v0.8.11/go.mod h1:ejHv5ehbceIglu1GsMwlH0nZpTftjxEY6DX7tvaM8gA= -cloud.google.com/go/gkehub v0.14.11/go.mod h1:CsmDJ4qbBnSPkoBltEubK6qGOjG0xNfeeT5jI5gCnRQ= -cloud.google.com/go/gkemulticloud v1.2.4/go.mod h1:PjTtoKLQpIRztrL+eKQw8030/S4c7rx/WvHydDJlpGE= -cloud.google.com/go/grafeas v0.3.6/go.mod h1:to6ECAPgRO2xeqD8ISXHc70nObJuaKZThreQOjeOH3o= -cloud.google.com/go/gsuiteaddons v1.6.11/go.mod h1:U7mk5PLBzDpHhgHv5aJkuvLp9RQzZFpa8hgWAB+xVIk= -cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= -cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= -cloud.google.com/go/iap v1.9.10/go.mod h1:pO0FEirrhMOT1H0WVwpD5dD9r3oBhvsunyBQtNXzzc0= -cloud.google.com/go/ids v1.4.11/go.mod h1:+ZKqWELpJm8WcRRsSvKZWUdkriu4A3XsLLzToTv3418= -cloud.google.com/go/iot v1.7.11/go.mod h1:0vZJOqFy9kVLbUXwTP95e0dWHakfR4u5IWqsKMGIfHk= -cloud.google.com/go/kms v1.18.5/go.mod h1:yXunGUGzabH8rjUPImp2ndHiGolHeWJJ0LODLedicIY= -cloud.google.com/go/language v1.13.0/go.mod h1:B9FbD17g1EkilctNGUDAdSrBHiFOlKNErLljO7jplDU= -cloud.google.com/go/lifesciences v0.9.11/go.mod h1:NMxu++FYdv55TxOBEvLIhiAvah8acQwXsz79i9l9/RY= +cloud.google.com/go/functions v1.19.0/go.mod h1:WDreEDZoUVoOkXKDejFWGnprrGYn2cY2KHx73UQERC0= +cloud.google.com/go/gkebackup v1.6.0/go.mod h1:1rskt7NgawoMDHTdLASX8caXXYG3MvDsoZ7qF4RMamQ= +cloud.google.com/go/gkeconnect v0.11.0/go.mod h1:l3iPZl1OfT+DUQ+QkmH1PC5RTLqxKQSVnboLiQGAcCA= +cloud.google.com/go/gkehub v0.15.0/go.mod h1:obpeROly2mjxZJbRkFfHEflcH54XhJI+g2QgfHphL0I= +cloud.google.com/go/gkemulticloud v1.3.0/go.mod h1:XmcOUQ+hJI62fi/klCjEGs6lhQ56Zjs14sGPXsGP0mE= +cloud.google.com/go/grafeas v0.3.10/go.mod h1:Mz/AoXmxNhj74VW0fz5Idc3kMN2VZMi4UT5+UPx5Pq0= +cloud.google.com/go/gsuiteaddons v1.7.0/go.mod h1:/B1L8ANPbiSvxCgdSwqH9CqHIJBzTt6v50fPr3vJCtg= +cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= +cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= +cloud.google.com/go/iap v1.10.0/go.mod h1:gDT6LZnKnWNCaov/iQbj7NMUpknFDOkhhlH8PwIrpzU= +cloud.google.com/go/ids v1.5.0/go.mod h1:4NOlC1m9hAJL50j2cRV4PS/J6x/f4BBM0Xg54JQLCWw= +cloud.google.com/go/iot v1.8.0/go.mod h1:/NMFENPnQ2t1UByUC1qFvA80fo1KFB920BlyUPn1m3s= +cloud.google.com/go/kms v1.19.0/go.mod h1:e4imokuPJUc17Trz2s6lEXFDt8bgDmvpVynH39bdrHM= +cloud.google.com/go/language v1.14.0/go.mod h1:ldEdlZOFwZREnn/1yWtXdNzfD7hHi9rf87YDkOY9at4= +cloud.google.com/go/lifesciences v0.10.0/go.mod h1:1zMhgXQ7LbMbA5n4AYguFgbulbounfUoYvkV8dtsLcA= cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= @@ -102,68 +105,68 @@ cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUz cloud.google.com/go/longrunning v0.5.6/go.mod h1:vUaDrWYOMKRuhiv6JBnn49YxCPz2Ayn9GqyjaBT8/mA= cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= cloud.google.com/go/longrunning v0.5.9/go.mod h1:HD+0l9/OOW0za6UWdKJtXoFAX/BGg/3Wj8p10NeWF7c= -cloud.google.com/go/longrunning v0.5.10/go.mod h1:tljz5guTr5oc/qhlUjBlk7UAIFMOGuPNxkNDZXlLics= cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= -cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE= cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU= -cloud.google.com/go/managedidentities v1.6.11/go.mod h1:df+8oZ1D4Eri+NrcpuiR5Hd6MGgiMqn0ZCzNmBYPS0A= -cloud.google.com/go/maps v1.11.6/go.mod h1:MOS/NN0L6b7Kumr8bLux9XTpd8+D54DYxBMUjq+XfXs= -cloud.google.com/go/mediatranslation v0.8.11/go.mod h1:3sNEm0fx61eHk7rfzBzrljVV9XKr931xI3OFacQBVFg= -cloud.google.com/go/memcache v1.10.11/go.mod h1:ubJ7Gfz/xQawQY5WO5pht4Q0dhzXBFeEszAeEJnwBHU= -cloud.google.com/go/metastore v1.13.10/go.mod h1:RPhMnBxUmTLT1fN7fNbPqtH5EoGHueDxubmJ1R1yT84= -cloud.google.com/go/monitoring v1.20.4/go.mod h1:v7F/UcLRw15EX7xq565N7Ae5tnYEE28+Cl717aTXG4c= -cloud.google.com/go/networkconnectivity v1.14.10/go.mod h1:f7ZbGl4CV08DDb7lw+NmMXQTKKjMhgCEEwFbEukWuOY= -cloud.google.com/go/networkmanagement v1.13.6/go.mod h1:WXBijOnX90IFb6sberjnGrVtZbgDNcPDUYOlGXmG8+4= -cloud.google.com/go/networksecurity v0.9.11/go.mod h1:4xbpOqCwplmFgymAjPFM6ZIplVC6+eQ4m7sIiEq9oJA= -cloud.google.com/go/notebooks v1.11.9/go.mod h1:JmnRX0eLgHRJiyxw8HOgumW9iRajImZxr7r75U16uXw= -cloud.google.com/go/optimization v1.6.9/go.mod h1:mcvkDy0p4s5k7iSaiKrwwpN0IkteHhGmuW5rP9nXA5M= -cloud.google.com/go/orchestration v1.9.6/go.mod h1:gQvdIsHESZJigimnbUA8XLbYeFlSg/z+A7ppds5JULg= -cloud.google.com/go/orgpolicy v1.12.5/go.mod h1:f778/jOHKp6cP6NbbQgjy4SDfQf6BoVGiSWdxky3ONQ= -cloud.google.com/go/orgpolicy v1.12.7/go.mod h1:Os3GlUFRPf1UxOHTup5b70BARnhHeQNNVNZzJXPbWYI= -cloud.google.com/go/osconfig v1.13.0/go.mod h1:tlACnQi1rtSLnHRYzfw9SH9zXs0M7S1jqiW2EOCn2Y0= -cloud.google.com/go/osconfig v1.13.2/go.mod h1:eupylkWQJCwSIEMkpVR4LqpgKkQi0mD4m1DzNCgpQso= -cloud.google.com/go/oslogin v1.13.7/go.mod h1:xq027cL0fojpcEcpEQdWayiDn8tIx3WEFYMM6+q7U+E= -cloud.google.com/go/phishingprotection v0.8.11/go.mod h1:Mge0cylqVFs+D0EyxlsTOJ1Guf3qDgrztHzxZqkhRQM= -cloud.google.com/go/policytroubleshooter v1.10.9/go.mod h1:X8HEPVBWz8E+qwI/QXnhBLahEHdcuPO3M9YvSj0LDek= -cloud.google.com/go/privatecatalog v0.9.11/go.mod h1:awEF2a8M6UgoqVJcF/MthkF8SSo6OoWQ7TtPNxUlljY= -cloud.google.com/go/pubsub v1.41.0/go.mod h1:g+YzC6w/3N91tzG66e2BZtp7WrpBBMXVa3Y9zVoOGpk= +cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= +cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= +cloud.google.com/go/managedidentities v1.7.0/go.mod h1:o4LqQkQvJ9Pt7Q8CyZV39HrzCfzyX8zBzm8KIhRw91E= +cloud.google.com/go/maps v1.12.0/go.mod h1:qjErDNStn3BaGx06vHner5d75MRMgGflbgCuWTuslMc= +cloud.google.com/go/mediatranslation v0.9.0/go.mod h1:udnxo0i4YJ5mZfkwvvQQrQ6ra47vcX8jeGV+6I5x+iU= +cloud.google.com/go/memcache v1.11.0/go.mod h1:99MVF02m5TByT1NKxsoKDnw5kYmMrjbGSeikdyfCYZk= +cloud.google.com/go/metastore v1.14.0/go.mod h1:vtPt5oVF/+ocXO4rv4GUzC8Si5s8gfmo5OIt6bACDuE= +cloud.google.com/go/monitoring v1.21.0/go.mod h1:tuJ+KNDdJbetSsbSGTqnaBvbauS5kr3Q/koy3Up6r+4= +cloud.google.com/go/networkconnectivity v1.15.0/go.mod h1:uBQqx/YHI6gzqfV5J/7fkKwTGlXvQhHevUuzMpos9WY= +cloud.google.com/go/networkmanagement v1.14.0/go.mod h1:4myfd4A0uULCOCGHL1npZN0U+kr1Z2ENlbHdCCX4cE8= +cloud.google.com/go/networksecurity v0.10.0/go.mod h1:IcpI5pyzlZyYG8cNRCJmY1AYKajsd9Uz575HoeyYoII= +cloud.google.com/go/notebooks v1.12.0/go.mod h1:euIZBbGY6G0J+UHzQ0XflysP0YoAUnDPZU7Fq0KXNw8= +cloud.google.com/go/optimization v1.7.0/go.mod h1:6KvAB1HtlsMMblT/lsQRIlLjUhKjmMWNqV1AJUctbWs= +cloud.google.com/go/orchestration v1.10.0/go.mod h1:pGiFgTTU6c/nXHTPpfsGT8N4Dax8awccCe6kjhVdWjI= +cloud.google.com/go/orgpolicy v1.12.8/go.mod h1:WHkLGqHILPnMgJ4UTdag6YgztVIgWS+T5T6tywH3cSM= +cloud.google.com/go/orgpolicy v1.13.0/go.mod h1:oKtT56zEFSsYORUunkN2mWVQBc9WGP7yBAPOZW1XCXc= +cloud.google.com/go/osconfig v1.13.3/go.mod h1:gIFyyriC1ANob8SnpwrQ6jjNroRwItoBOYfqiG3LkUU= +cloud.google.com/go/osconfig v1.14.0/go.mod h1:GhZzWYVrnQ42r+K5pA/hJCsnWVW2lB6bmVg+GnZ6JkM= +cloud.google.com/go/oslogin v1.14.0/go.mod h1:VtMzdQPRP3T+w5OSFiYhaT/xOm7H1wo1HZUD2NAoVK4= +cloud.google.com/go/phishingprotection v0.9.0/go.mod h1:CzttceTk9UskH9a8BycYmHL64zakEt3EXaM53r4i0Iw= +cloud.google.com/go/policytroubleshooter v1.11.0/go.mod h1:yTqY8n60lPLdU5bRbImn9IazrmF1o5b0VBshVxPzblQ= +cloud.google.com/go/privatecatalog v0.10.0/go.mod h1:/Lci3oPTxJpixjiTBoiVv3PmUZg/IdhPvKHcLEgObuc= +cloud.google.com/go/pubsub v1.43.0/go.mod h1:LNLfqItblovg7mHWgU5g84Vhza4J8kTxx0YqIeTzcXY= cloud.google.com/go/pubsublite v1.8.2/go.mod h1:4r8GSa9NznExjuLPEJlF1VjOPOpgf3IT6k8x/YgaOPI= -cloud.google.com/go/recaptchaenterprise/v2 v2.14.2/go.mod h1:MwPgdgvBkE46aWuuXeBTCB8hQJ88p+CpXInROZYCTkc= -cloud.google.com/go/recommendationengine v0.8.11/go.mod h1:cEkU4tCXAF88a4boMFZym7U7uyxvVwcQtKzS85IbQio= -cloud.google.com/go/recommender v1.12.7/go.mod h1:lG8DVtczLltWuaCv4IVpNphONZTzaCC9KdxLYeZM5G4= -cloud.google.com/go/redis v1.16.4/go.mod h1:unCVfLP5eFrVhGLDnb7IaSaWxuZ+7cBgwwBwbdG9m9w= -cloud.google.com/go/resourcemanager v1.9.11/go.mod h1:SbNAbjVLoi2rt9G74bEYb3aw1iwvyWPOJMnij4SsmHA= -cloud.google.com/go/resourcesettings v1.7.4/go.mod h1:seBdLuyeq+ol2u9G2+74GkSjQaxaBWF+vVb6mVzQFG0= -cloud.google.com/go/retail v1.17.4/go.mod h1:oPkL1FzW7D+v/hX5alYIx52ro2FY/WPAviwR1kZZTMs= -cloud.google.com/go/run v1.4.0/go.mod h1:4G9iHLjdOC+CQ0CzA0+6nLeR6NezVPmlj+GULmb0zE4= -cloud.google.com/go/scheduler v1.10.12/go.mod h1:6DRtOddMWJ001HJ6MS148rtLSh/S2oqd2hQC3n5n9fQ= -cloud.google.com/go/secretmanager v1.13.6/go.mod h1:x2ySyOrqv3WGFRFn2Xk10iHmNmvmcEVSSqc30eb1bhw= -cloud.google.com/go/security v1.17.4/go.mod h1:KMuDJH+sEB3KTODd/tLJ7kZK+u2PQt+Cfu0oAxzIhgo= -cloud.google.com/go/securitycenter v1.33.1/go.mod h1:jeFisdYUWHr+ig72T4g0dnNCFhRwgwGoQV6GFuEwafw= -cloud.google.com/go/servicedirectory v1.11.11/go.mod h1:pnynaftaj9LmRLIc6t3r7r7rdCZZKKxui/HaF/RqYfs= -cloud.google.com/go/shell v1.7.11/go.mod h1:SywZHWac7onifaT9m9MmegYp3GgCLm+tgk+w2lXK8vg= -cloud.google.com/go/spanner v1.65.0/go.mod h1:dQGB+w5a67gtyE3qSKPPxzniedrnAmV6tewQeBY7Hxs= -cloud.google.com/go/speech v1.24.0/go.mod h1:HcVyIh5jRXM5zDMcbFCW+DF2uK/MSGN6Rastt6bj1ic= +cloud.google.com/go/recaptchaenterprise/v2 v2.17.0/go.mod h1:SS4QDdlmJ3NvbOMCXQxaFhVGRjvNMfoKCoCdxqXadqs= +cloud.google.com/go/recommendationengine v0.9.0/go.mod h1:59ydKXFyXO4Y8S0Bk224sKfj6YvIyzgcpG6w8kXIMm4= +cloud.google.com/go/recommender v1.13.0/go.mod h1:+XkXkeB9k6zG222ZH70U6DBkmvEL0na+pSjZRmlWcrk= +cloud.google.com/go/redis v1.17.0/go.mod h1:pzTdaIhriMLiXu8nn2CgiS52SYko0tO1Du4d3MPOG5I= +cloud.google.com/go/resourcemanager v1.10.0/go.mod h1:kIx3TWDCjLnUQUdjQ/e8EXsS9GJEzvcY+YMOHpADxrk= +cloud.google.com/go/resourcesettings v1.8.0/go.mod h1:/hleuSOq8E6mF1sRYZrSzib8BxFHprQXrPluWTuZ6Ys= +cloud.google.com/go/retail v1.18.0/go.mod h1:vaCabihbSrq88mKGKcKc4/FDHvVcPP0sQDAt0INM+v8= +cloud.google.com/go/run v1.5.0/go.mod h1:Z4Tv/XNC/veO6rEpF0waVhR7vEu5RN1uJQ8dD1PeMtI= +cloud.google.com/go/scheduler v1.11.0/go.mod h1:RBSu5/rIsF5mDbQUiruvIE6FnfKpLd3HlTDu8aWk0jw= +cloud.google.com/go/secretmanager v1.14.0/go.mod h1:q0hSFHzoW7eRgyYFH8trqEFavgrMeiJI4FETNN78vhM= +cloud.google.com/go/security v1.18.0/go.mod h1:oS/kRVUNmkwEqzCgSmK2EaGd8SbDUvliEiADjSb/8Mo= +cloud.google.com/go/securitycenter v1.35.0/go.mod h1:gotw8mBfCxX0CGrRK917CP/l+Z+QoDchJ9HDpSR8eDc= +cloud.google.com/go/servicedirectory v1.12.0/go.mod h1:lKKBoVStJa+8S+iH7h/YRBMUkkqFjfPirkOTEyYAIUk= +cloud.google.com/go/shell v1.8.0/go.mod h1:EoQR8uXuEWHUAMoB4+ijXqRVYatDCdKYOLAaay1R/yw= +cloud.google.com/go/spanner v1.67.0/go.mod h1:Um+TNmxfcCHqNCKid4rmAMvoe/Iu1vdz6UfxJ9GPxRQ= +cloud.google.com/go/speech v1.25.0/go.mod h1:2IUTYClcJhqPgee5Ko+qJqq29/bglVizgIap0c5MvYs= cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= -cloud.google.com/go/storagetransfer v1.10.10/go.mod h1:8+nX+WgQ2ZJJnK8e+RbK/zCXk8T7HdwyQAJeY7cEcm0= -cloud.google.com/go/talent v1.6.12/go.mod h1:nT9kNVuJhZX2QgqKZS6t6eCWZs5XEBYRBv6bIMnPmo4= -cloud.google.com/go/texttospeech v1.7.11/go.mod h1:Ua125HU+WT2IkIo5MzQtuNpNEk72soShJQVdorZ1SAE= -cloud.google.com/go/tpu v1.6.11/go.mod h1:W0C4xaSj1Ay3VX/H96FRvLt2HDs0CgdRPVI4e7PoCDk= -cloud.google.com/go/trace v1.10.12/go.mod h1:tYkAIta/gxgbBZ/PIzFxSH5blajgX4D00RpQqCG/GZs= +cloud.google.com/go/storagetransfer v1.11.0/go.mod h1:arcvgzVC4HPcSikqV8D4h4PwrvGQHfKtbL4OwKPirjs= +cloud.google.com/go/talent v1.7.0/go.mod h1:8zfRPWWV4GNZuUmBwQub0gWAe2KaKhsthyGtV8fV1bY= +cloud.google.com/go/texttospeech v1.8.0/go.mod h1:hAgeA01K5QNfLy2sPUAVETE0L4WdEpaCMfwKH1qjCQU= +cloud.google.com/go/tpu v1.7.0/go.mod h1:/J6Co458YHMD60nM3cCjA0msvFU/miCGMfx/nYyxv/o= +cloud.google.com/go/trace v1.11.0/go.mod h1:Aiemdi52635dBR7o3zuc9lLjXo3BwGaChEjCa3tJNmM= cloud.google.com/go/translate v1.10.3/go.mod h1:GW0vC1qvPtd3pgtypCv4k4U8B7EdgK9/QEF2aJEUovs= -cloud.google.com/go/translate v1.10.7/go.mod h1:mH/+8tvcItuy1cOWqU+/Y3iFHgkVUObNIQYI/kiFFiY= -cloud.google.com/go/video v1.22.0/go.mod h1:CxPshUNAb1ucnzbtruEHlAal9XY+SPG2cFqC/woJzII= -cloud.google.com/go/videointelligence v1.11.11/go.mod h1:dab2Ca3AXT6vNJmt3/6ieuquYRckpsActDekLcsd6dU= -cloud.google.com/go/vision/v2 v2.8.6/go.mod h1:G3v0uovxCye3u369JfrHGY43H6u/IQ08x9dw5aVH8yY= -cloud.google.com/go/vmmigration v1.7.11/go.mod h1:PmD1fDB0TEHGQR1tDZt9GEXFB9mnKKalLcTVRJKzcQA= -cloud.google.com/go/vmwareengine v1.2.0/go.mod h1:rPjCHu6hG9N8d6PhkoDWFkqL9xpbFY+ueVW+0pNFbZg= -cloud.google.com/go/vpcaccess v1.7.11/go.mod h1:a2cuAiSCI4TVK0Dt6/dRjf22qQvfY+podxst2VvAkcI= -cloud.google.com/go/webrisk v1.9.11/go.mod h1:mK6M8KEO0ZI7VkrjCq3Tjzw4vYq+3c4DzlMUDVaiswE= -cloud.google.com/go/websecurityscanner v1.6.11/go.mod h1:vhAZjksELSg58EZfUQ1BMExD+hxqpn0G0DuyCZQjiTg= -cloud.google.com/go/workflows v1.12.10/go.mod h1:RcKqCiOmKs8wFUEf3EwWZPH5eHc7Oq0kamIyOUCk0IE= -code.cloudfoundry.org/bytefmt v0.1.0 h1:NmVhaUPBO59QQpt5vwYW8crDUksCnvTCQi+Q6uOeLwM= -code.cloudfoundry.org/bytefmt v0.1.0/go.mod h1:eF2ZbltNI7Pv+8Cuyeksu9up5FN5konuH0trDJBuscw= +cloud.google.com/go/translate v1.12.0/go.mod h1:4/C4shFIY5hSZ3b3g+xXWM5xhBLqcUqksSMrQ7tyFtc= +cloud.google.com/go/video v1.23.0/go.mod h1:EGLQv3Ce/VNqcl/+Amq7jlrnpg+KMgQcr6YOOBfE9oc= +cloud.google.com/go/videointelligence v1.12.0/go.mod h1:3rjmafNpCEqAb1CElGTA7dsg8dFDsx7RQNHS7o088D0= +cloud.google.com/go/vision/v2 v2.9.0/go.mod h1:sejxShqNOEucObbGNV5Gk85hPCgiVPP4sWv0GrgKuNw= +cloud.google.com/go/vmmigration v1.8.0/go.mod h1:+AQnGUabjpYKnkfdXJZ5nteUfzNDCmwbj/HSLGPFG5E= +cloud.google.com/go/vmwareengine v1.3.0/go.mod h1:7W/C/YFpelGyZzRUfOYkbgUfbN1CK5ME3++doIkh1Vk= +cloud.google.com/go/vpcaccess v1.8.0/go.mod h1:7fz79sxE9DbGm9dbbIdir3tsJhwCxiNAs8aFG8MEhR8= +cloud.google.com/go/webrisk v1.10.0/go.mod h1:ztRr0MCLtksoeSOQCEERZXdzwJGoH+RGYQ2qodGOy2U= +cloud.google.com/go/websecurityscanner v1.7.0/go.mod h1:d5OGdHnbky9MAZ8SGzdWIm3/c9p0r7t+5BerY5JYdZc= +cloud.google.com/go/workflows v1.13.0/go.mod h1:StCuY3jhBj1HYMjCPqZs7J0deQLHPhF6hDtzWJaVF+Y= +code.cloudfoundry.org/bytefmt v0.9.0 h1:PmZlKwcjR2KqjhDCPcWDMyZYaspXCshkeQ+bfHAg+nM= +code.cloudfoundry.org/bytefmt v0.9.0/go.mod h1:WEpQg+9NS7k9tV66pocDKhliJWLTXwB8C5rL5isF8nc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20221208032759-85de2813cf6b/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= eliasnaur.com/font v0.0.0-20230308162249-dd43949cb42d/go.mod h1:OYVuxibdk9OSLX8vAqydtRPP87PyTFcT9uH3MlEGBQA= @@ -194,8 +197,8 @@ github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9/go.mod h1:7uhhqiBaR4CpN0k9rMjOtjpcfGd6DG2m04zQxKnWQ0I= -github.com/ajstarks/deck v0.0.0-20240814155529-0478e0c25be8/go.mod h1:5o5HzZ3nUiOivE0SPQepE7oNquDd+9yip0PtlFpq888= -github.com/ajstarks/deck/generate v0.0.0-20240814155529-0478e0c25be8/go.mod h1:al/X+Mdfx3esXeGnnIVn5aYB6SfwTu+9T0u4EXmKJuk= +github.com/ajstarks/deck v0.0.0-20240828115917-88fc45aa28b1/go.mod h1:5o5HzZ3nUiOivE0SPQepE7oNquDd+9yip0PtlFpq888= +github.com/ajstarks/deck/generate v0.0.0-20240828115917-88fc45aa28b1/go.mod h1:al/X+Mdfx3esXeGnnIVn5aYB6SfwTu+9T0u4EXmKJuk= github.com/ajstarks/fc v0.0.0-20230606144319-ef5d5cb73a3d/go.mod h1:Qp3TfzbBiIjHwDxIpu+g9nYfNw+xXF2Yqp4WmMlTtwM= github.com/ajstarks/openvg v0.0.0-20191008131700-c6885d824eb8/go.mod h1:jpZHIkd4sQEgrzshrUQrRfv5OUMMq0w/Q1yK6ZYhUlk= github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw= @@ -218,42 +221,42 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= -github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= +github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= -github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg= -github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.11 h1:FEDZD/Axt5tKSkPAs967KZ++MkvYdBqr0a+cetRbjLM= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.11/go.mod h1:dvlsbA32KfvCzqwTiX7maABgFek2RyUuYEJ3kyn/PmQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= +github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU= +github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks= +github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I= +github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18 h1:9DIp7vhmOPmueCDwpXa45bEbLHHTt1kcxChdTJWWxvI= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.18/go.mod h1:aJv/Fwz8r56ozwYFRC4bzoeL1L17GYQYemfblOBux1M= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 h1:GckUnpm4EJOAio1c8o25a+b3lVfwVzC9gnSBqiiNmZM= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18/go.mod h1:Br6+bxfG33Dk3ynmkhsW2Z/t9D4+lRqdLDNCKi85w0U= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHCiSH0jyd6gROjlJtNwov0eGYNz8s8nFcR0jQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 h1:Cso4Ev/XauMVsbwdhYEoxg8rxZWw43CFqqaPB5w3W2c= -github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19 h1:FLMkfEiRjhgeDTCjjLoc3URo/TBkgeQbocA78lfkzSI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.19/go.mod h1:Vx+GucNSsdhaxs3aZIKfSUjKVGsxN25nX2SRcdhuw08= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17 h1:u+EfGmksnJc/x5tq3A+OD7LrMbSSR/5TrKLvkdy/fhY= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.17/go.mod h1:VaMx6302JHax2vHJWgRo+5n9zvbacs3bLU/23DNQrTY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2 h1:Kp6PWAlXwP1UvIflkIP6MFZYBNDCa4mFCGtxrpICVOg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.61.2/go.mod h1:5FmD/Dqq57gP+XwaUnd5WFPipAuzrf0HmupX27Gvjvc= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o= github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -275,8 +278,8 @@ github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwys github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= @@ -304,8 +307,8 @@ github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88= -github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= +github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= +github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -350,7 +353,7 @@ github.com/go-pdf/fpdf v1.4.3/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhO github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= @@ -397,14 +400,15 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= +github.com/google/go-pkcs11 v0.3.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV4uIlFds5weTmxbo= github.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI= github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= @@ -413,8 +417,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20240903155634-a8630aee4ab9 h1:q5g0N9eal4bmJwXHC5z0QCKs8qhS35hFfq0BAYsIwZI= +github.com/google/pprof v0.0.0-20240903155634-a8630aee4ab9/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -434,8 +438,9 @@ github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/ github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.3 h1:QRje2j5GZimBzlbhGA2V2QlGNgL8G6e+wGo/+/2bWI0= +github.com/googleapis/enterprise-certificate-proxy v0.3.3/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= @@ -487,8 +492,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:C github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.17.10-0.20240812095115-3868468e621b h1:4A/j6hb0Sd3VXqhNtgmUlcPy353Qaa0aIfAPcBrI1n8= -github.com/klauspost/compress v1.17.10-0.20240812095115-3868468e621b/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.10-0.20240903161129-13c124496702 h1:q44xybtNinH6elZBfsrq3nMeepSAxGG0vHf2dupILU4= +github.com/klauspost/compress v1.17.10-0.20240903161129-13c124496702/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kpango/fastime v1.1.9 h1:xVQHcqyPt5M69DyFH7g1EPRns1YQNap9d5eLhl/Jy84= @@ -527,8 +532,8 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ github.com/mandolyte/mdtopdf v1.3.2/go.mod h1:c28Ldk+tVc/y7QQcEcILStS/OFlerdXGGdBUzJQBgEo= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.23 h1:gbShiuAP1W5j9UOksQ06aiiqPMxYecovVGwmTxWtuw0= +github.com/mattn/go-sqlite3 v1.14.23/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2/go.mod h1:76rfSfYPWj01Z85hUf/ituArm797mNKcvINh1OlsZKo= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= @@ -556,10 +561,10 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= -github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= -github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= -github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4= +github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= +github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= +github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= @@ -577,12 +582,12 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= -github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= +github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= +github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= @@ -643,32 +648,33 @@ github.com/zeebo/assert v1.3.1 h1:vukIABvugfNMZMQO1ABsyQDJDTVQbn+LWSMy1ol1h6A= github.com/zeebo/assert v1.3.1/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI= -go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= -go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= +go.einride.tech/aip v0.68.0/go.mod h1:7y9FF8VtPWqpxuAxl0KQWqaULxW4zFIesD6zF5RIHHg= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= -go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0 h1:k6fQVDQexDE+3jG2SfCQjnHS7OamcP73YMoxEVq5B6k= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.29.0/go.mod h1:t4BrYLHU450Zo9fnydWlIuswB1bm7rM8havDpWOJeDo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.starlark.net v0.0.0-20240725214946-42030a7cedce h1:YyGqCjZtGZJ+mRPaenEiB87afEO2MFRzLiJNZ0Z0bPw= @@ -687,41 +693,41 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= gocloud.dev v0.39.0 h1:EYABYGhAalPUaMrbSKOr5lejxoxvXj99nE8XFtsDgds= gocloud.dev v0.39.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= +golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/exp/shiny v0.0.0-20220827204233-334a2380cb91/go.mod h1:VjAR7z0ngyATZTELrBSkxOOHhhlnVUxDye4mcjx5h/8= golang.org/x/exp/shiny v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:UH99kUObWAZkDnWqppdQe5ZhPYESUw8I0zVV1uWBR+0= golang.org/x/exp/shiny v0.0.0-20230817173708-d852ddb80c63/go.mod h1:UH99kUObWAZkDnWqppdQe5ZhPYESUw8I0zVV1uWBR+0= golang.org/x/exp/shiny v0.0.0-20240707233637-46b078467d37/go.mod h1:3F+MieQB7dRYLTmnncoFbb1crS5lfQoTfDgQy6K4N0o= -golang.org/x/exp/typeparams v0.0.0-20240808152545-0cdaa3abc0fa h1:54T+HVkPu4D3lltpEHyI3Fs2pG/GqjGkXLgyKOmifXk= -golang.org/x/exp/typeparams v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/image v0.19.0 h1:D9FX4QWkLfkeqaC62SonffIIuYdOk/UE2XKUBgRIBIQ= -golang.org/x/image v0.19.0/go.mod h1:y0zrRqlQRWQ5PXaYCOMLTW2fpsxZ8Qh9I/ohnInJEys= +golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0 h1:bVwtbF629Xlyxk6xLQq2TDYmqP0uiWaet5LwRebuY0k= +golang.org/x/exp/typeparams v0.0.0-20240909161429-701f63a606c0/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.20.0 h1:7cVCUjQwfL18gyBJOmYvptfSHS8Fb3YUDtfLIZ7Nbpw= +golang.org/x/image v0.20.0/go.mod h1:0a88To4CYVBAHp5FXJm8o7QbUl37Vd85ply1vyD8auM= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20240806205939-81131f6468ab/go.mod h1:udWezQGYjqrCxz5nV321pXQTx5oGbZx+khZvFjZNOPM= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/mobile v0.0.0-20240909163608-642950227fb3/go.mod h1:5EJr05J3jS1A5hwVNxs4vC0pIRxtWmwM15D1ZxCj93s= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= +golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= +golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= -golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= -golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= @@ -730,18 +736,18 @@ gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946 h1:vJpL69PeUullhJyKtTjHjENE gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946/go.mod h1:BQUWDHIAygjdt1HnUPQ0eWqLN2n5FwJycrpYUVUOx2I= gonum.org/v1/plot v0.14.0 h1:+LBDVFYwFe4LHhdP8coW6296MBEY4nQ+Y4vuUpJopcE= gonum.org/v1/plot v0.14.0/go.mod h1:MLdR9424SJed+5VqC6MsouEpig9pZX2VZ57H9ko2bXU= -google.golang.org/api v0.192.0 h1:PljqpNAfZaaSpS+TnANfnNAXKdzHM/B9bKhwRlo7JP0= -google.golang.org/api v0.192.0/go.mod h1:9VcphjvAxPKLmSxVSzPlSRXy/5ARMEw5bf58WoVXafQ= +google.golang.org/api v0.196.0 h1:k/RafYqebaIJBO3+SMnfEGtFVlvp5vSgqTUF54UN/zg= +google.golang.org/api v0.196.0/go.mod h1:g9IL21uGkYgvQ5BZg6BAtoGJQIm8r6EgaAbpNey5wBE= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 h1:oLiyxGgE+rt22duwci1+TG7bg2/L1LQsXwfjPlmuJA0= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142/go.mod h1:G11eXq53iI5Q+kyNOmCvnzBaxEA2Q/Ik5Tj7nqBE8j4= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:5/MT647Cn/GGhwTpXC7QqcaR5Cnee4v4MKCU1/nwnIQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:q0eWNnCW04EJlyrmLT+ZHsjuoUiZ36/eAEdCCezZoco= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.66.1 h1:hO5qAXR19+/Z44hmvIM4dQFMSYX9XcWsByfoxutBpAM= +google.golang.org/grpc v1.66.1/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -769,12 +775,12 @@ k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34 h1:/amS69DLm09mtbFtN3+LyygSFohnYGMseF8iv+2zulg= -k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34/go.mod h1:G0W3eI9gG219NHRq3h5uQaRBl4pj4ZpwzRP5ti8y770= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= k8s.io/metrics v0.30.3 h1:gKCpte5zykrOmQhZ8qmsxyJslMdiLN+sqbBfIWNpbGM= k8s.io/metrics v0.30.3/go.mod h1:W06L2nXRhOwPkFYDJYWdEIS3u6JcJy3ebIPYbndRs6A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= diff --git a/hack/cspell/main.go b/hack/cspell/main.go new file mode 100644 index 0000000000..0595983918 --- /dev/null +++ b/hack/cspell/main.go @@ -0,0 +1,349 @@ +// Copyright (C) 2019-2024 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package main + +import ( + "bufio" + "cmp" + "encoding/json" + "flag" + "fmt" + "os" + "regexp" + "slices" + "strings" + "sync" +) + +type CSpellConfig struct { + Version string `json:"version"` + Language string `json:"language"` + Import []string `json:"import"` + IgnorePaths []string `json:"ignorePaths"` + Patterns []PatternEntry `json:"patterns,omitempty"` + IgnoreRegExpList []string `json:"ignoreRegExpList,omitempty"` + IgnoreWords []string `json:"ignoreWords,omitempty"` + IgnoreWordsMap map[string][]string `json:"ignoreWordsMap,omitempty"` +} + +type PatternEntry struct { + Name string `json:"name"` + Pattern string `json:"pattern"` +} + +type Override struct { + Files []string `json:"files"` + Words []string `json:"words"` +} + +var ( + mandatoryImports = []string{ + "@cspell/dict-cpp/cspell-ext.json", + "@cspell/dict-docker/cspell-ext.json", + "@cspell/dict-en_us/cspell-ext.json", + "@cspell/dict-fullstack/cspell-ext.json", + "@cspell/dict-git/cspell-ext.json", + "@cspell/dict-golang/cspell-ext.json", + "@cspell/dict-k8s/cspell-ext.json", + "@cspell/dict-makefile/cspell-ext.json", + "@cspell/dict-markdown/cspell-ext.json", + "@cspell/dict-npm/cspell-ext.json", + "@cspell/dict-public-licenses/cspell-ext.json", + "@cspell/dict-rust/cspell-ext.json", + "@cspell/dict-shell/cspell-ext.json", + } + + mandatoryIgnorePaths = []string{ + "**/*.ai", + "**/*.drawio", + "**/*.hdf5", + "**/*.key", + "**/*.lock", + "**/*.log", + "**/*.md5", + "**/*.pack", + "**/*.pdf", + "**/*.pem", + "**/*.png", + "**/*.sum", + "**/*.svg", + "**/.cspell.json", + "**/.git/objects/**", + "**/cmd/agent/core/faiss/faiss", + "**/cmd/agent/core/ngt/ngt", + "**/cmd/agent/sidecar/sidecar", + "**/cmd/discoverer/k8s/discoverer", + "**/cmd/gateway/filter/filter", + "**/cmd/gateway/lb/lb", + "**/cmd/gateway/mirror/mirror", + "**/cmd/index/job/correction/index-correction", + "**/cmd/index/job/creation/index-creation", + "**/cmd/index/job/readreplica/rotate/readreplica-rotate", + "**/cmd/index/job/save/index-save", + "**/cmd/index/operator/index-operator", + "**/cmd/manager/index/index", + "**/cmd/tools/benchmark/job/job", + "**/cmd/tools/benchmark/operator/operator", + "**/cmd/tools/cli/loadtest/loadtest", + "**/hack/cspell/**", + "**/internal/core/algorithm/ngt/assets/index", + "**/internal/test/data/agent/ngt/validIndex", + } + suffixes = []string{ + "addr", + "addrs", + "buf", + "cancel", + "cfg", + "ch", + "cnt", + "conf", + "conn", + "ctx", + "dim", + "dur", + "env", + "err", + "error", + "errors", + "errs", + "idx", + "len", + "mu", + "opt", + "opts", + "pool", + "req", + "res", + "size", + "vec", + } + + sufReg = regexp.MustCompile(fmt.Sprintf("(%s)$", strings.Join(suffixes, "|"))) + + prexp = regexp.MustCompile(`Unknown word \((.*?)\) Suggestions`) +) + +func extractLine(line string) (filePath, word string, ok bool) { + filePath, line, ok = strings.Cut(line, ":") + if !ok || len(filePath) == 0 { + return "", "", false + } + _, s, ok := strings.Cut(line, " - ") + if ok { + line = s + } + matches := prexp.FindStringSubmatch(line) + if len(matches) > 1 { + return filePath, matches[1], true + } + return "", "", false +} + +func parseCspellResult(filePath string, th int) (map[string][]string, map[string]bool, error) { + file, err := os.Open(filePath) + if err != nil { + return nil, nil, fmt.Errorf("could not open file: %w", err) + } + defer file.Close() + + var ( + wg sync.WaitGroup + mu sync.Mutex + ) + wordsByFile := make(map[string][]string) + filesByWord := make(map[string][]string) + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + wg.Add(1) + go func() { + defer wg.Done() + // Extract the unknown word + if path, word, ok := extractLine(line); ok { + if sufReg.MatchString(word) { + return + } + lword := strings.ToLower(word) + mu.Lock() + w, ok := wordsByFile[path] + if !ok || w == nil { + w = make([]string, 0, 2) + } + wordsByFile[path] = append(w, word) + + f, ok := filesByWord[word] + if !ok || f == nil { + f = make([]string, 0, 2) + } + filesByWord[word] = append(f, path) + + if word != lword { + f, ok = filesByWord[lword] + if !ok || f == nil { + f = make([]string, 0, 2) + } + filesByWord[lword] = append(f, path) + } + mu.Unlock() + } + }() + } + + wg.Wait() + + globalWords := make(map[string]bool) + for word, files := range filesByWord { + lword := strings.ToLower(word) + if word != lword { + lfiles, ok := filesByWord[lword] + if ok { + files = append(files, lfiles...) + slices.Sort(files) + files = slices.Compact(files) + if len(files) >= th { + globalWords[lword] = true + globalWords[word] = true + } + } + } else if len(files) >= th { + globalWords[word] = true + } + } + + if err := scanner.Err(); err != nil { + return nil, nil, fmt.Errorf("error reading file: %w", err) + } + + return wordsByFile, globalWords, nil +} + +func loadConfig(path string) (config *CSpellConfig, err error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + config = new(CSpellConfig) + err = json.NewDecoder(file).Decode(config) + if err != nil { + return nil, err + } + return config, nil +} + +func saveConfig(path string, config *CSpellConfig) error { + file, err := os.Create(path) + if err != nil { + return err + } + defer file.Close() + + encoder := json.NewEncoder(file) + encoder.SetIndent("", " ") + return encoder.Encode(config) +} + +func addPatterns(config *CSpellConfig) { + if config.Patterns == nil { + config.Patterns = make([]PatternEntry, 0, len(suffixes)) + } + if config.IgnoreRegExpList == nil { + config.IgnoreRegExpList = make([]string, 0, len(suffixes)) + } + for _, suffix := range suffixes { + pat := fmt.Sprintf("/\\b\\w*%s\\b/", suffix) + name := fmt.Sprintf("Ignore_%s_suffix", suffix) + config.Patterns = append(config.Patterns, PatternEntry{ + Name: name, + Pattern: pat, + }) + config.IgnoreRegExpList = append(config.IgnoreRegExpList, name) + } + slices.SortFunc(config.Patterns, func(left, right PatternEntry) int { + return cmp.Compare(left.Name, right.Name) + }) + config.Patterns = slices.CompactFunc(config.Patterns, func(left, right PatternEntry) bool { + return left.Name == right.Name + }) + slices.Sort(config.IgnoreRegExpList) + config.IgnoreRegExpList = slices.Compact(config.IgnoreRegExpList) +} + +func main() { + configPath := flag.String("config", ".cspell.json", "Path to the existing .cspell.json file") + outputPath := flag.String("output", "", "Path to the cspell output log") + threshold := flag.Int("threshold", 5, "Threshold for declaring global words") + + flag.Parse() + + if *outputPath == "" { + fmt.Println("Error: output path is required") + os.Exit(1) + } + + config, err := loadConfig(*configPath) + if err != nil || config == nil { + config = new(CSpellConfig) + } + config.Import = mandatoryImports + config.IgnorePaths = mandatoryIgnorePaths + config.Version = "0.2" + config.Language = "en" + + addPatterns(config) + + wordsByFile, globalWords, err := parseCspellResult(*outputPath, *threshold) + if err != nil { + fmt.Println("Error:", err) + return + } + + if config.IgnoreWords == nil { + config.IgnoreWords = make([]string, 0, len(globalWords)) + } + for word := range globalWords { + config.IgnoreWords = append(config.IgnoreWords, word) + fmt.Println(config.IgnoreWords) + } + slices.Sort(config.IgnoreWords) + for _, word := range config.IgnoreWords { + globalWords[word] = true + } + if config.IgnoreWordsMap == nil { + config.IgnoreWordsMap = make(map[string][]string, len(wordsByFile)) + } + for filePath, words := range wordsByFile { + words = slices.DeleteFunc(words, func(word string) bool { + return globalWords[strings.ToLower(word)] + }) + if len(words) > 0 { + im, ok := config.IgnoreWordsMap[filePath] + if !ok || im == nil { + slices.Sort(words) + config.IgnoreWordsMap[filePath] = words + } else { + words = append(im, words...) + slices.Sort(words) + config.IgnoreWordsMap[filePath] = slices.Compact(words) + } + } + } + + if err := saveConfig(*configPath, config); err != nil { + fmt.Println("Error: output path is required") + os.Exit(1) + } +} diff --git a/hack/cspell/main_test.go b/hack/cspell/main_test.go new file mode 100644 index 0000000000..a11c66b062 --- /dev/null +++ b/hack/cspell/main_test.go @@ -0,0 +1,532 @@ +// Copyright (C) 2019-2024 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package main + +// NOT IMPLEMENTED BELOW +// +// func Test_extractLine(t *testing.T) { +// type args struct { +// line string +// } +// type want struct { +// wantFilePath string +// wantWord string +// wantOk bool +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, string, string, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotFilePath string, gotWord string, gotOk bool) error { +// if !reflect.DeepEqual(gotFilePath, w.wantFilePath) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotFilePath, w.wantFilePath) +// } +// if !reflect.DeepEqual(gotWord, w.wantWord) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotWord, w.wantWord) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// line:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// line:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// gotFilePath, gotWord, gotOk := extractLine(test.args.line) +// if err := checkFunc(test.want, gotFilePath, gotWord, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_parseCspellResult(t *testing.T) { +// type args struct { +// filePath string +// th int +// } +// type want struct { +// want map[string][]string +// want1 map[string]bool +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, map[string][]string, map[string]bool, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got map[string][]string, got1 map[string]bool, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// if !reflect.DeepEqual(got1, w.want1) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got1, w.want1) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// filePath:"", +// th:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// filePath:"", +// th:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got, got1, err := parseCspellResult(test.args.filePath, test.args.th) +// if err := checkFunc(test.want, got, got1, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_loadConfig(t *testing.T) { +// type args struct { +// path string +// } +// type want struct { +// wantConfig *CSpellConfig +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, *CSpellConfig, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotConfig *CSpellConfig, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotConfig, w.wantConfig) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotConfig, w.wantConfig) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// path:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// path:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// gotConfig, err := loadConfig(test.args.path) +// if err := checkFunc(test.want, gotConfig, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_saveConfig(t *testing.T) { +// type args struct { +// path string +// config *CSpellConfig +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// path:"", +// config:CSpellConfig{}, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// path:"", +// config:CSpellConfig{}, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// err := saveConfig(test.args.path, test.args.config) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_addPatterns(t *testing.T) { +// type args struct { +// config *CSpellConfig +// } +// type want struct{} +// type test struct { +// name string +// args args +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// config:CSpellConfig{}, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// config:CSpellConfig{}, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// addPatterns(test.args.config) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_main(t *testing.T) { +// type want struct{} +// type test struct { +// name string +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// main() +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/hack/docker/gen/main.go b/hack/docker/gen/main.go index 7750270efc..b26d05950b 100644 --- a/hack/docker/gen/main.go +++ b/hack/docker/gen/main.go @@ -86,8 +86,9 @@ COPY {{$files}} {{- end}} SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 -RUN {{RunMounts .RunMounts}}\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ +RUN {{RunMounts .RunMounts}} \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -223,6 +224,9 @@ const ( organization = "vdaas" repository = "vald" defaultBinaryDir = "/usr/bin" + usrLocal = "/usr/local" + usrLocalBinaryDir = usrLocal + "/bin" + usrLocalLibDir = usrLocal + "/lib" defaultBuilderImage = "ghcr.io/vdaas/vald/vald-buildbase" defaultBuilderTag = "nightly" defaultLanguage = "en_US.UTF-8" @@ -230,7 +234,8 @@ const ( defaultRuntimeImage = "gcr.io/distroless/static" defaultRuntimeTag = "nonroot" defaultRuntimeUser = "nonroot:nonroot" - defaultBuildUser = "root:root" + rootUser = "root" + defaultBuildUser = rootUser + ":" + rootUser defaultBuildStageName = "builder" maintainerKey = "MAINTAINER" minimumArgumentLength = 2 @@ -274,28 +279,28 @@ var ( defaultEnvironments = map[string]string{ "DEBIAN_FRONTEND": "noninteractive", - "HOME": "/root", - "USER": "root", + "HOME": "/" + rootUser, + "USER": rootUser, "INITRD": "No", "LANG": defaultLanguage, "LANGUAGE": defaultLanguage, "LC_ALL": defaultLanguage, "ORG": organization, "TZ": "Etc/UTC", - "PATH": "${PATH}:/usr/local/bin", + "PATH": "${PATH}:" + usrLocalBinaryDir, "REPO": repository, } goDefaultEnvironments = map[string]string{ "GOROOT": "/opt/go", "GOPATH": "/go", "GO111MODULE": "on", - "PATH": "${PATH}:${GOROOT}/bin:${GOPATH}/bin:/usr/local/bin", + "PATH": "${PATH}:${GOROOT}/bin:${GOPATH}/bin:" + usrLocalBinaryDir, } rustDefaultEnvironments = map[string]string{ - "RUST_HOME": "/usr/loacl/lib/rust", + "RUST_HOME": usrLocalLibDir + "/rust", "RUSTUP_HOME": "${RUST_HOME}/rustup", "CARGO_HOME": "${RUST_HOME}/cargo", - "PATH": "${PATH}:${RUSTUP_HOME}/bin:${CARGO_HOME}/bin:/usr/local/bin", + "PATH": "${PATH}:${RUSTUP_HOME}/bin:${CARGO_HOME}/bin:" + usrLocalBinaryDir, } clangDefaultEnvironments = map[string]string{ "CC": "gcc", @@ -321,13 +326,13 @@ var ( defaultMounts = []string{ "--mount=type=bind,target=.,rw", "--mount=type=tmpfs,target=/tmp", - "--mount=type=cache,target=/var/lib/apt,sharing=locked", - "--mount=type=cache,target=/var/cache/apt,sharing=locked", + "--mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME}", + "--mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME}", } - goDefaultMounts = []string{ "--mount=type=cache,target=\"${GOPATH}/pkg\",id=\"go-build-${TARGETARCH}\"", "--mount=type=cache,target=\"${HOME}/.cache/go-build\",id=\"go-build-${TARGETARCH}\"", + "--mount=type=tmpfs,target=\"${GOPATH}/src\"", } clangBuildDeps = []string{ @@ -372,7 +377,6 @@ var ( "make kubelinter/install", "make reviewdog/install", "make tparse/install", - "make valdcli/install", "make yq/install", "make minikube/install", "make stern/install", @@ -597,7 +601,7 @@ func main() { "OPERATOR_SDK_VERSION": "latest", }, ExtraCopies: []string{ - "--from=operator /usr/local/bin/${APP_NAME} {{$.BinDir}}/${APP_NAME}", + "--from=operator " + usrLocalBinaryDir + "/${APP_NAME} {{$.BinDir}}/${APP_NAME}", }, ExtraImages: []string{ "quay.io/operator-framework/helm-operator:${OPERATOR_SDK_VERSION} AS operator", @@ -628,7 +632,7 @@ func main() { }, Entrypoints: []string{"{{$.BinDir}}/{{.AppName}}", "run", "--watches-file=" + helmOperatorWatchFile}, }, - "vald-cli-loadtest": { + "vald-loadtest": { AppName: "loadtest", PackageDir: "tools/cli/loadtest", ExtraPackages: append(clangBuildDeps, "libhdf5-dev", "libaec-dev"), @@ -790,9 +794,9 @@ func main() { data.RootDir = "${HOME}" data.Environments["ROOTDIR"] = os.Args[1] } - if strings.Contains(data.BuildUser, "root") { - data.Environments["HOME"] = "/root" - data.Environments["USER"] = "root" + if strings.Contains(data.BuildUser, rootUser) { + data.Environments["HOME"] = "/" + rootUser + data.Environments["USER"] = rootUser } else { user := data.BuildUser if strings.Contains(user, ":") { diff --git a/hack/go.mod.default b/hack/go.mod.default index 5582e58f87..7185722665 100644 --- a/hack/go.mod.default +++ b/hack/go.mod.default @@ -1,6 +1,6 @@ module github.com/vdaas/vald -go 1.23.0 +go 1.23.1 replace ( cloud.google.com/go => cloud.google.com/go upgrade diff --git a/internal/backoff/backoff.go b/internal/backoff/backoff.go index 04c1355739..82fc9087bc 100644 --- a/internal/backoff/backoff.go +++ b/internal/backoff/backoff.go @@ -189,7 +189,21 @@ func (b *backoff) Do( } } } - return res, err + select { + case <-dctx.Done(): + switch dctx.Err() { + case context.DeadlineExceeded: + log.Debugf("[backoff]\tfor: "+name+",\tDeadline Exceeded\terror: %v", err.Error()) + return nil, errors.ErrBackoffTimeout(err) + case context.Canceled: + log.Debugf("[backoff]\tfor: "+name+",\tCanceled\terror: %v", err.Error()) + return nil, err + default: + return nil, errors.Join(dctx.Err(), err) + } + default: + return res, err + } } func (b *backoff) addJitter(dur float64) float64 { diff --git a/internal/backoff/backoff_test.go b/internal/backoff/backoff_test.go index 0549983bbb..1a57a004b2 100644 --- a/internal/backoff/backoff_test.go +++ b/internal/backoff/backoff_test.go @@ -288,7 +288,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return nil, false, err } @@ -317,7 +317,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return str, true, err } @@ -346,7 +346,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ @@ -380,7 +380,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ @@ -413,7 +413,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return str, true, err } @@ -442,7 +442,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return str, true, err } @@ -470,7 +470,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx, cancel := context.WithCancel(context.Background()) - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { cancel() return str, true, err @@ -499,7 +499,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx, cancel := context.WithCancel(context.Background()) - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ @@ -532,7 +532,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ diff --git a/internal/cache/gache/option_test.go b/internal/cache/gache/option_test.go index 1e35584599..625e1229aa 100644 --- a/internal/cache/gache/option_test.go +++ b/internal/cache/gache/option_test.go @@ -60,7 +60,7 @@ func TestDefaultOptions(t *testing.T) { tests := []test{ { - name: "set succuess", + name: "set success", want: want{ want: &cache[any]{ gache: gache.New[any](), @@ -122,7 +122,7 @@ func TestWithGache(t *testing.T) { func() test { ga := gache.New[any]() return test{ - name: "set succuess when g is not nil", + name: "set success when g is not nil", args: args{ g: ga, }, @@ -135,7 +135,7 @@ func TestWithGache(t *testing.T) { }(), func() test { return test{ - name: "set succuess when g is nil", + name: "set success when g is nil", want: want{ want: new(T), }, @@ -195,7 +195,7 @@ func TestWithExpiredHook(t *testing.T) { func() test { fn := func(context.Context, string) {} return test{ - name: "set succuess when f is not nil", + name: "set success when f is not nil", args: args{ f: fn, }, @@ -214,7 +214,7 @@ func TestWithExpiredHook(t *testing.T) { }(), func() test { return test{ - name: "set succuess when fn is nil", + name: "set success when fn is nil", want: want{ want: new(T), }, @@ -272,7 +272,7 @@ func TestWithExpireDuration(t *testing.T) { tests := []test{ { - name: "set succuess when dur is 0", + name: "set success when dur is 0", args: args{ dur: 0, }, @@ -281,7 +281,7 @@ func TestWithExpireDuration(t *testing.T) { }, }, { - name: "set succuess when dur is not 0", + name: "set success when dur is not 0", args: args{ dur: 10, }, @@ -343,7 +343,7 @@ func TestWithExpireCheckDuration(t *testing.T) { tests := []test{ { - name: "set succuess when dur is 0", + name: "set success when dur is 0", args: args{ dur: 0, }, @@ -352,7 +352,7 @@ func TestWithExpireCheckDuration(t *testing.T) { }, }, { - name: "set succuess when dur is not 0", + name: "set success when dur is not 0", args: args{ dur: 10, }, diff --git a/internal/cache/option.go b/internal/cache/option.go index ebedb19099..ef5a7583ae 100644 --- a/internal/cache/option.go +++ b/internal/cache/option.go @@ -55,7 +55,7 @@ func WithType[V any](mo string) Option[V] { } } -// WithExpireDuration returns Option after set expireDur when dur is cprrect param. +// WithExpireDuration returns Option after set expireDur when dur is correct param. func WithExpireDuration[V any](dur string) Option[V] { return func(c *cache[V]) { if len(dur) == 0 { @@ -69,7 +69,7 @@ func WithExpireDuration[V any](dur string) Option[V] { } } -// WithExpireCheckDuration returns Option after set expireCheckDur when dur is cprrect param. +// WithExpireCheckDuration returns Option after set expireCheckDur when dur is correct param. func WithExpireCheckDuration[V any](dur string) Option[V] { return func(c *cache[V]) { if len(dur) == 0 { diff --git a/internal/circuitbreaker/breaker.go b/internal/circuitbreaker/breaker.go index 29c499c000..81f3a455ef 100644 --- a/internal/circuitbreaker/breaker.go +++ b/internal/circuitbreaker/breaker.go @@ -35,7 +35,7 @@ type breaker struct { minSamples int64 openTimeout time.Duration openExp int64 // unix time - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 // unix time } @@ -172,7 +172,7 @@ func (b *breaker) currentState() State { func (b *breaker) reset() { atomic.StoreInt32(&b.tripped, 0) atomic.StoreInt64(&b.openExp, 0) - atomic.StoreInt64(&b.closedRefreshExp, time.Now().Add(b.cloedRefreshTimeout).UnixNano()) + atomic.StoreInt64(&b.closedRefreshExp, time.Now().Add(b.closedRefreshTimeout).UnixNano()) b.count.reset() } diff --git a/internal/circuitbreaker/breaker_test.go b/internal/circuitbreaker/breaker_test.go index 234e6c2cb6..71df0681bb 100644 --- a/internal/circuitbreaker/breaker_test.go +++ b/internal/circuitbreaker/breaker_test.go @@ -35,7 +35,7 @@ func Test_breaker_isReady(t *testing.T) { minSamples int64 openTimeout time.Duration openExp int64 - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 } type want struct { @@ -162,7 +162,7 @@ func Test_breaker_isReady(t *testing.T) { minSamples: test.fields.minSamples, openTimeout: test.fields.openTimeout, openExp: test.fields.openExp, - cloedRefreshTimeout: test.fields.cloedRefreshTimeout, + closedRefreshTimeout: test.fields.closedRefreshTimeout, closedRefreshExp: test.fields.closedRefreshExp, } @@ -186,7 +186,7 @@ func Test_breaker_success(t *testing.T) { minSamples int64 openTimeout time.Duration openExp int64 - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 } type want struct{} @@ -281,7 +281,7 @@ func Test_breaker_success(t *testing.T) { minSamples: test.fields.minSamples, openTimeout: test.fields.openTimeout, openExp: test.fields.openExp, - cloedRefreshTimeout: test.fields.cloedRefreshTimeout, + closedRefreshTimeout: test.fields.closedRefreshTimeout, closedRefreshExp: test.fields.closedRefreshExp, } if test.afterFunc != nil { @@ -308,7 +308,7 @@ func Test_breaker_fail(t *testing.T) { minSamples int64 openTimeout time.Duration openExp int64 - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 } type want struct{} @@ -409,7 +409,7 @@ func Test_breaker_fail(t *testing.T) { t.Errorf("state changed: %d", b.tripped) } if total := b.count.Total(); total == 0 { - t.Errorf("count reseted: %d", total) + t.Errorf("count resetted: %d", total) } }, } @@ -439,7 +439,7 @@ func Test_breaker_fail(t *testing.T) { minSamples: test.fields.minSamples, openTimeout: test.fields.openTimeout, openExp: test.fields.openExp, - cloedRefreshTimeout: test.fields.cloedRefreshTimeout, + closedRefreshTimeout: test.fields.closedRefreshTimeout, closedRefreshExp: test.fields.closedRefreshExp, } if test.afterFunc != nil { @@ -564,7 +564,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct { @@ -613,7 +613,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -647,7 +647,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -689,7 +689,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -713,7 +713,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct { @@ -749,7 +749,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -779,7 +779,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -821,7 +821,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -845,7 +845,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct{} @@ -876,7 +876,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -906,7 +906,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -948,7 +948,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -972,7 +972,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct{} @@ -1003,7 +1003,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1033,7 +1033,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1075,7 +1075,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -1099,7 +1099,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct { @@ -1135,7 +1135,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1165,7 +1165,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1207,7 +1207,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // diff --git a/internal/circuitbreaker/options.go b/internal/circuitbreaker/options.go index f48337a18e..9b02e8abe0 100644 --- a/internal/circuitbreaker/options.go +++ b/internal/circuitbreaker/options.go @@ -131,7 +131,7 @@ func WithClosedRefreshTimeout(timeout string) BreakerOption { if err != nil { return errors.NewErrInvalidOption("closedRefreshTimeout", timeout, err) } - b.cloedRefreshTimeout = d + b.closedRefreshTimeout = d return nil } } diff --git a/internal/client/v1/client/vald/vald.go b/internal/client/v1/client/vald/vald.go index 8b0578ce8b..bb3a2c97ab 100644 --- a/internal/client/v1/client/vald/vald.go +++ b/internal/client/v1/client/vald/vald.go @@ -499,6 +499,28 @@ func (c *client) MultiUpdate( return res, nil } +func (c *client) UpdateTimestamp( + ctx context.Context, in *payload.Update_TimestampRequest, opts ...grpc.CallOption, +) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "internal/client/"+vald.UpdateTimestampRPCName), apiName+"/"+vald.UpdateTimestampRPCName) + defer func() { + if span != nil { + span.End() + } + }() + _, err = c.c.RoundRobin(ctx, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + res, err = vald.NewValdClient(conn).UpdateTimestamp(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + func (c *client) Upsert( ctx context.Context, in *payload.Upsert_Request, opts ...grpc.CallOption, ) (res *payload.Object_Location, err error) { @@ -1088,6 +1110,18 @@ func (c *singleClient) Update( return c.vc.Update(ctx, in, opts...) } +func (c *singleClient) UpdateTimestamp( + ctx context.Context, in *payload.Update_TimestampRequest, opts ...grpc.CallOption, +) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "internal/singleClient/"+vald.UpdateTimestampRPCName), apiName+"/"+vald.UpdateTimestampRPCName) + defer func() { + if span != nil { + span.End() + } + }() + return c.vc.UpdateTimestamp(ctx, in, opts...) +} + func (c *singleClient) StreamUpdate( ctx context.Context, opts ...grpc.CallOption, ) (res vald.Update_StreamUpdateClient, err error) { diff --git a/internal/client/v1/client/vald/vald_test.go b/internal/client/v1/client/vald/vald_test.go index 10a006d2bf..e3320d9b09 100644 --- a/internal/client/v1/client/vald/vald_test.go +++ b/internal/client/v1/client/vald/vald_test.go @@ -2603,6 +2603,118 @@ package vald // } // } // +// func Test_client_UpdateTimestamp(t *testing.T) { +// type args struct { +// ctx context.Context +// in *payload.Update_TimestampRequest +// opts []grpc.CallOption +// } +// type fields struct { +// addrs []string +// c grpc.Client +// } +// type want struct { +// wantRes *payload.Object_Location +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Object_Location, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// addrs:nil, +// c:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// addrs:nil, +// c:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &client{ +// addrs: test.fields.addrs, +// c: test.fields.c, +// } +// +// gotRes, err := c.UpdateTimestamp(test.args.ctx, test.args.in, test.args.opts...) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_client_Upsert(t *testing.T) { // type args struct { // ctx context.Context @@ -4274,6 +4386,118 @@ package vald // } // } // +// func Test_client_IndexProperty(t *testing.T) { +// type args struct { +// ctx context.Context +// in *payload.Empty +// opts []grpc.CallOption +// } +// type fields struct { +// addrs []string +// c grpc.Client +// } +// type want struct { +// wantRes *payload.Info_Index_PropertyDetail +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Info_Index_PropertyDetail, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Info_Index_PropertyDetail, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// addrs:nil, +// c:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// addrs:nil, +// c:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &client{ +// addrs: test.fields.addrs, +// c: test.fields.c, +// } +// +// gotRes, err := c.IndexProperty(test.args.ctx, test.args.in, test.args.opts...) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_client_GetTimestamp(t *testing.T) { // type args struct { // ctx context.Context @@ -6495,6 +6719,114 @@ package vald // } // } // +// func Test_singleClient_UpdateTimestamp(t *testing.T) { +// type args struct { +// ctx context.Context +// in *payload.Update_TimestampRequest +// opts []grpc.CallOption +// } +// type fields struct { +// vc vald.Client +// } +// type want struct { +// wantRes *payload.Object_Location +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Object_Location, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// vc:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// vc:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &singleClient{ +// vc: test.fields.vc, +// } +// +// gotRes, err := c.UpdateTimestamp(test.args.ctx, test.args.in, test.args.opts...) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_singleClient_StreamUpdate(t *testing.T) { // type args struct { // ctx context.Context @@ -8319,6 +8651,114 @@ package vald // } // } // +// func Test_singleClient_IndexProperty(t *testing.T) { +// type args struct { +// ctx context.Context +// in *payload.Empty +// opts []grpc.CallOption +// } +// type fields struct { +// vc vald.Client +// } +// type want struct { +// wantRes *payload.Info_Index_PropertyDetail +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Info_Index_PropertyDetail, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Info_Index_PropertyDetail, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// vc:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// vc:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &singleClient{ +// vc: test.fields.vc, +// } +// +// gotRes, err := c.IndexProperty(test.args.ctx, test.args.in, test.args.opts...) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_singleClient_GetTimestamp(t *testing.T) { // type args struct { // ctx context.Context diff --git a/internal/compress/gob_test.go b/internal/compress/gob_test.go index 68545c880f..ef38462c70 100644 --- a/internal/compress/gob_test.go +++ b/internal/compress/gob_test.go @@ -391,7 +391,7 @@ func Test_gobCompressor_Reader(t *testing.T) { src io.ReadCloser } type fields struct { - transcodr gob.Transcoder + transcoder gob.Transcoder } type want struct { want io.ReadCloser @@ -425,7 +425,7 @@ func Test_gobCompressor_Reader(t *testing.T) { src: rc, }, fields: fields{ - transcodr: &gob.MockTranscoder{ + transcoder: &gob.MockTranscoder{ NewDecoderFunc: func(r io.Reader) gob.Decoder { return dec }, @@ -457,7 +457,7 @@ func Test_gobCompressor_Reader(t *testing.T) { checkFunc = defaultCheckFunc } g := &gobCompressor{ - transcoder: test.fields.transcodr, + transcoder: test.fields.transcoder, } got, err := g.Reader(test.args.src) diff --git a/internal/compress/lz4_test.go b/internal/compress/lz4_test.go index a54240d66a..989cf1c240 100644 --- a/internal/compress/lz4_test.go +++ b/internal/compress/lz4_test.go @@ -476,14 +476,14 @@ func Test_lz4Compressor_DecompressVector(t *testing.T) { }, }, { - name: "returns (nil, error) when decompresse fails", + name: "returns (nil, error) when decompress fails", args: args{ bs: []byte("vdaas/vald"), }, fields: fields{ gobc: &MockCompressor{ DecompressVectorFunc: func(bytes []byte) (vector []float32, err error) { - return nil, errors.New("decompresse err") + return nil, errors.New("decompress err") }, }, compressionLevel: 0, @@ -497,7 +497,7 @@ func Test_lz4Compressor_DecompressVector(t *testing.T) { }, want: want{ want: nil, - err: errors.New("decompresse err"), + err: errors.New("decompress err"), }, }, } @@ -653,7 +653,7 @@ func Test_lz4Compressor_Writer(t *testing.T) { w = new(lz4.MockWriter) ) return test{ - name: "returns (io.WriteCloser, nil) when no erro occurs", + name: "returns (io.WriteCloser, nil) when no error occurs", args: args{ dst: dst, }, diff --git a/internal/config/cassandra_test.go b/internal/config/cassandra_test.go index cfe1967498..8ffad55132 100644 --- a/internal/config/cassandra_test.go +++ b/internal/config/cassandra_test.go @@ -283,7 +283,7 @@ func TestCassandra_Bind(t *testing.T) { key := "CASSANDRA_BIND_PASSWORD" val := "cassandra_password" return test{ - name: "return Cassandra struct when Password is set via the envirionment value", + name: "return Cassandra struct when Password is set via the environment value", fields: fields{ Password: "_" + key + "_", }, diff --git a/internal/config/faiss_test.go b/internal/config/faiss_test.go index fc69110c00..f3230735fc 100644 --- a/internal/config/faiss_test.go +++ b/internal/config/faiss_test.go @@ -22,6 +22,7 @@ package config // Nlist int // M int // NbitsPerIdx int +// MethodType string // MetricType string // EnableInMemoryMode bool // AutoIndexCheckDuration string @@ -65,6 +66,7 @@ package config // Nlist:0, // M:0, // NbitsPerIdx:0, +// MethodType:"", // MetricType:"", // EnableInMemoryMode:false, // AutoIndexCheckDuration:"", @@ -102,6 +104,7 @@ package config // Nlist:0, // M:0, // NbitsPerIdx:0, +// MethodType:"", // MetricType:"", // EnableInMemoryMode:false, // AutoIndexCheckDuration:"", @@ -151,6 +154,7 @@ package config // Nlist: test.fields.Nlist, // M: test.fields.M, // NbitsPerIdx: test.fields.NbitsPerIdx, +// MethodType: test.fields.MethodType, // MetricType: test.fields.MetricType, // EnableInMemoryMode: test.fields.EnableInMemoryMode, // AutoIndexCheckDuration: test.fields.AutoIndexCheckDuration, diff --git a/internal/config/log.go b/internal/config/log.go index 4f8ca0c602..908e89c59c 100644 --- a/internal/config/log.go +++ b/internal/config/log.go @@ -24,7 +24,7 @@ type Logging struct { Format string `json:"format" yaml:"format"` } -// Bind returns Logging object whose every value is field value or envirionment value. +// Bind returns Logging object whose every value is field value or environment value. func (l *Logging) Bind() *Logging { l.Logger = GetActualValue(l.Logger) l.Level = GetActualValue(l.Level) diff --git a/internal/core/algorithm/ngt/ngt_test.go b/internal/core/algorithm/ngt/ngt_test.go index 82cffb9464..801ff7f88c 100644 --- a/internal/core/algorithm/ngt/ngt_test.go +++ b/internal/core/algorithm/ngt/ngt_test.go @@ -103,7 +103,7 @@ func TestNew(t *testing.T) { beforeFunc func(args) afterFunc func(*testing.T, NGT) error } - defaultComprators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { + defaultComparators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { return s1 == s2 }))) defaultCheckFunc := func(w want, got NGT, err error, comparators ...comparator.Option) error { @@ -234,7 +234,7 @@ func TestNew(t *testing.T) { } comparators := test.comparators if test.comparators == nil || len(test.comparators) == 0 { - comparators = defaultComprators + comparators = defaultComparators } got, err := New(test.args.opts...) @@ -692,7 +692,7 @@ func Test_gen(t *testing.T) { beforeFunc func(*testing.T, args) afterFunc func(*testing.T, NGT) error } - defaultComprators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { + defaultComparators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { return s1 == s2 }))) defaultCheckFunc := func(_ context.Context, w want, got NGT, err error, comparators ...comparator.Option) error { @@ -839,7 +839,7 @@ func Test_gen(t *testing.T) { } comparators := test.comparators if test.comparators == nil || len(test.comparators) == 0 { - comparators = defaultComprators + comparators = defaultComparators } got, err := gen(test.args.isLoad, test.args.opts...) @@ -1049,7 +1049,7 @@ func Test_ngt_loadOptions(t *testing.T) { }, }, { - name: "load option failed with Ignoreable error", + name: "load option failed with Ignorable error", args: args{ opts: []Option{ func(n *ngt) error { @@ -1107,7 +1107,7 @@ func Test_ngt_loadOptions(t *testing.T) { func Test_ngt_create(t *testing.T) { // This test is skipped because it requires ngt.prop to be set probably. // We cannot initialize ngt.prop since it is C dependencies. - // This function is called by New(), and the ngt.prop is destoried in New(), so we cannot test this function individually. + // This function is called by New(), and the ngt.prop is destroyed in New(), so we cannot test this function individually. t.SkipNow() } @@ -1476,7 +1476,7 @@ func Test_ngt_Search(t *testing.T) { }, }, { - name: "resturn vector id after the nearby vector inserted (uint8)", + name: "return vector id after the nearby vector inserted (uint8)", args: args{ ctx: context.Background(), vec: []float32{1, 2, 3, 4, 5, 6, 7, 8, 9}, @@ -1653,7 +1653,7 @@ func Test_ngt_Search(t *testing.T) { }, }, { - name: "resturn vector id after the nearby vector inserted (float)", + name: "return vector id after the nearby vector inserted (float)", args: args{ ctx: context.Background(), vec: []float32{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.91}, diff --git a/internal/db/rdb/mysql/dbr/dbr.go b/internal/db/rdb/mysql/dbr/dbr.go index e123403154..00702c9d6b 100644 --- a/internal/db/rdb/mysql/dbr/dbr.go +++ b/internal/db/rdb/mysql/dbr/dbr.go @@ -18,7 +18,7 @@ package dbr import dbr "github.com/gocraft/dbr/v2" -// DBR repreesnts the interface to create connection to MySQL. +// DBR represents the interface to create connection to MySQL. type DBR interface { Open(driver, dsn string, log EventReceiver) (Connection, error) Eq(col string, val any) Builder diff --git a/internal/db/rdb/mysql/dbr/insert.go b/internal/db/rdb/mysql/dbr/insert.go index 8d96e916c8..d15a9865fa 100644 --- a/internal/db/rdb/mysql/dbr/insert.go +++ b/internal/db/rdb/mysql/dbr/insert.go @@ -34,13 +34,13 @@ type insertStmt struct { *dbr.InsertStmt } -// Columns set colums to the insertStmt. +// Columns set column to the insertStmt. func (stmt *insertStmt) Columns(column ...string) InsertStmt { stmt.InsertStmt = stmt.InsertStmt.Columns(column...) return stmt } -// ExecContext execure inserting to the database. +// ExecContext execute inserting to the database. func (stmt *insertStmt) ExecContext(ctx context.Context) (sql.Result, error) { return stmt.InsertStmt.ExecContext(ctx) } diff --git a/internal/db/rdb/mysql/dbr/session.go b/internal/db/rdb/mysql/dbr/session.go index c1d8f60918..f6700947c1 100644 --- a/internal/db/rdb/mysql/dbr/session.go +++ b/internal/db/rdb/mysql/dbr/session.go @@ -39,7 +39,7 @@ func NewSession(conn Connection, event EventReceiver) Session { return conn.NewSession(event) } -// SeleSelect creates and returns the SelectStmt. +// Select creates and returns the SelectStmt. func (sess *session) Select(column ...string) SelectStmt { return &selectStmt{ sess.Session.Select(column...), @@ -56,7 +56,7 @@ func (sess *session) Begin() (Tx, error) { // Close closes the database and prevents new queries from starting. // Close then waits for all queries that have started processing on the server to finish. -// Close returns the errro if something goes worng during close. +// Close returns the error if something goes wrong during close. func (sess *session) Close() error { return sess.Session.Close() } diff --git a/internal/db/rdb/mysql/dbr/tx.go b/internal/db/rdb/mysql/dbr/tx.go index ce5dc2d02d..3713766f34 100644 --- a/internal/db/rdb/mysql/dbr/tx.go +++ b/internal/db/rdb/mysql/dbr/tx.go @@ -43,7 +43,7 @@ func (t *tx) Rollback() error { return t.Tx.Rollback() } -// RollbackUnlessCommitted rollsback the transaction unless it has already been committed or rolled back. +// RollbackUnlessCommitted rollbacks the transaction unless it has already been committed or rolled back. func (t *tx) RollbackUnlessCommitted() { t.Tx.RollbackUnlessCommitted() } diff --git a/internal/db/rdb/mysql/mysql_test.go b/internal/db/rdb/mysql/mysql_test.go index c741df72cd..4523b818af 100644 --- a/internal/db/rdb/mysql/mysql_test.go +++ b/internal/db/rdb/mysql/mysql_test.go @@ -1818,7 +1818,7 @@ func Test_mySQLClient_SetVector(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -1916,7 +1916,7 @@ func Test_mySQLClient_SetVector(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2013,7 +2013,7 @@ func Test_mySQLClient_SetVector(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2512,7 +2512,7 @@ func Test_mySQLClient_SetVectors(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2612,7 +2612,7 @@ func Test_mySQLClient_SetVectors(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2711,7 +2711,7 @@ func Test_mySQLClient_SetVectors(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -3022,7 +3022,7 @@ func Test_mySQLClient_DeleteVector(t *testing.T) { func() test { err := errors.ErrMySQLTransactionNotCreated return test{ - name: "return error when transacton is nil", + name: "return error when transaction is nil", args: args{ ctx: context.Background(), uuid: "vald-01", @@ -3217,9 +3217,9 @@ func Test_mySQLClient_DeleteVector(t *testing.T) { } }(), func() test { - err := errors.New("podIPTableNmae error") + err := errors.New("podIPTableName error") return test{ - name: "return error when DeleteFromFunc(podIPTableNmae) returns error", + name: "return error when DeleteFromFunc(podIPTableName) returns error", args: args{ ctx: context.Background(), uuid: "vald-01", @@ -3813,7 +3813,7 @@ func Test_mySQLClient_SetIPs(t *testing.T) { RollbackUnlessCommittedFunc: func() {}, InsertIntoFunc: func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -3885,7 +3885,7 @@ func Test_mySQLClient_SetIPs(t *testing.T) { RollbackUnlessCommittedFunc: func() {}, InsertIntoFunc: func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { diff --git a/internal/db/rdb/mysql/option.go b/internal/db/rdb/mysql/option.go index 7eac41b6d5..4a4c8b3194 100644 --- a/internal/db/rdb/mysql/option.go +++ b/internal/db/rdb/mysql/option.go @@ -182,7 +182,7 @@ func WithConnectionLifeTimeLimit(dur string) Option { } // WithMaxIdleConns returns the option to set the maxIdleConns. -// If conns is negative numner, no idle connections are retained. +// If conns is negative number, no idle connections are retained. // ref: https://golang.org/src/database/sql/sql.go?s=24983:25019#L879 func WithMaxIdleConns(conns int) Option { return func(m *mySQLClient) error { @@ -194,7 +194,7 @@ func WithMaxIdleConns(conns int) Option { } // WithMaxOpenConns returns the option to set the maxOpenConns. -// If conns is negative numner, no limit on the number of open connections. +// If conns is negative number, no limit on the number of open connections. // ref: https://golang.org/src/database/sql/sql.go?s=24983:25019#L923 func WithMaxOpenConns(conns int) Option { return func(m *mySQLClient) error { diff --git a/internal/db/storage/blob/cloudstorage/option.go b/internal/db/storage/blob/cloudstorage/option.go index 28ccb2a056..89561867e7 100644 --- a/internal/db/storage/blob/cloudstorage/option.go +++ b/internal/db/storage/blob/cloudstorage/option.go @@ -39,7 +39,7 @@ func WithURL(str string) Option { } } -// WithURLOpener returns Option that sets c.urlOpner. +// WithURLOpener returns Option that sets c.urlOpener. func WithURLOpener(uo *gcsblob.URLOpener) Option { return func(c *client) error { if uo != nil { diff --git a/internal/db/storage/blob/s3/reader/option.go b/internal/db/storage/blob/s3/reader/option.go index d71350d227..8a71e59fa2 100644 --- a/internal/db/storage/blob/s3/reader/option.go +++ b/internal/db/storage/blob/s3/reader/option.go @@ -60,7 +60,7 @@ func WithBucket(bucket string) Option { } } -// WithMaxChunkSize retunrs the option to set the maxChunkSize. +// WithMaxChunkSize returns the option to set the maxChunkSize. func WithMaxChunkSize(size int64) Option { return func(r *reader) { r.maxChunkSize = size diff --git a/internal/db/storage/blob/s3/s3_test.go b/internal/db/storage/blob/s3/s3_test.go index 3b4532c69b..04e6023472 100644 --- a/internal/db/storage/blob/s3/s3_test.go +++ b/internal/db/storage/blob/s3/s3_test.go @@ -389,7 +389,7 @@ func Test_client_Close(t *testing.T) { } tests := []test{ { - name: "retursn nil", + name: "returns nil", want: want{ err: nil, }, diff --git a/internal/db/storage/blob/s3/session/session_test.go b/internal/db/storage/blob/s3/session/session_test.go index ba4eb8c8f8..50389e4f04 100644 --- a/internal/db/storage/blob/s3/session/session_test.go +++ b/internal/db/storage/blob/s3/session/session_test.go @@ -503,7 +503,7 @@ func Test_sess_Session(t *testing.T) { }, }, { - name: "set EnableParamValdiation success", + name: "set EnableParamValidation success", fields: fields{ enableParamValidation: true, }, @@ -532,7 +532,7 @@ func Test_sess_Session(t *testing.T) { }, }, { - name: "set Enable100Conitnue success", + name: "set Enable100Continue success", fields: fields{ enable100Continue: true, }, diff --git a/internal/errors/agent.go b/internal/errors/agent.go index da16986ade..560e1244d2 100644 --- a/internal/errors/agent.go +++ b/internal/errors/agent.go @@ -115,4 +115,50 @@ var ( // ErrWriteOperationToReadReplica represents an error that when a write operation is made to read replica. ErrWriteOperationToReadReplica = New("write operation to read replica is not possible") + + // ErrInvalidTimestamp represents a function to generate an error that the timestamp is invalid. + ErrInvalidTimestamp = func(ts int64) error { + return Errorf("invalid timestamp detected: %d", ts) + } + + // ErrFlushingIsInProgress represents an error that the flushing is in progress, but any request has been received. + ErrFlushingIsInProgress = New("flush is in progress") + + // ErrUUIDAlreadyExists represents a function to generate an error that the uuid already exists. + ErrUUIDAlreadyExists = func(uuid string) error { + return Errorf("uuid %s index already exists", uuid) + } + + // ErrUUIDNotFound represents a function to generate an error that the uuid is not found. + ErrUUIDNotFound = func(id uint32) error { + if id == 0 { + return New("object uuid not found") + } + return Errorf("object uuid %d's metadata not found", id) + } + + // ErrObjectIDNotFound represents a function to generate an error that the object id is not found. + ErrObjectIDNotFound = func(uuid string) error { + return Errorf("uuid %s's object id not found", uuid) + } + + // ErrRemoveRequestedBeforeIndexing represents a function to generate an error that the object is not indexed so can not remove it. + ErrRemoveRequestedBeforeIndexing = func(oid uint) error { + return Errorf("object id %d is not indexed we cannot remove it", oid) + } + + ErrSearchResultEmptyButNoDataStored = New("empty search result from cgo but no index data stored in agent, this error can be ignored.") + + // ErrZeroTimestamp represents an error that the timestamp is zero. + ErrZeroTimestamp = New("zero timestamp for index detected") + + // ErrNewerTimestampObjectAlreadyExists represents a function to generate an error that the object is already newer than request + ErrNewerTimestampObjectAlreadyExists = func(uuid string, ts int64) error { + return Errorf("uuid %s's object is already newer than requested timestamp %d", uuid, ts) + } + + // ErrNothingToBeDoneForUpdate represents a function to generate an error that there is no object to update + ErrNothingToBeDoneForUpdate = func(uuid string) error { + return Errorf("nothing to be done for update uuid %s's object", uuid) + } ) diff --git a/internal/errors/agent_test.go b/internal/errors/agent_test.go index 360045f847..dd6b579a54 100644 --- a/internal/errors/agent_test.go +++ b/internal/errors/agent_test.go @@ -17,7 +17,10 @@ // Package errors package errors -import "testing" +import ( + "math" + "testing" +) func TestErrObjectNotFound(t *testing.T) { type args struct { @@ -106,4 +109,1404 @@ func TestErrObjectNotFound(t *testing.T) { } } +func TestErrCreateProperty(t *testing.T) { + type args struct { + err error + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrCreateProperty error when err is ngt error", + args: args{ + err: New("ngt error"), + }, + want: want{ + want: New("failed to create property: ngt error"), + }, + }, + { + name: "return an ErrCreateProperty error when err is nil", + args: args{ + err: nil, + }, + want: want{ + want: New("failed to create property"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrCreateProperty(test.args.err) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrIndexNotFound(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrIndexNotFound error", + want: want{ + want: New("index not found"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrIndexNotFound + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrIndexLoadTimeout(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrIndexLoadTimeout error", + want: want{ + want: New("index load timeout"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrIndexLoadTimeout + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrInvalidDimensionSize(t *testing.T) { + type args struct { + current int + limit int + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrInvalidDimensionSize error when current is 10 and limit is 5", + args: args{ + current: 10, + limit: 5, + }, + want: want{ + want: New("dimension size 10 is invalid, the supporting dimension size must be between 2 ~ 5"), + }, + }, + { + name: "return an ErrInvalidDimensionSize error when current is 0 and limit is 5", + args: args{ + current: 0, + limit: 5, + }, + want: want{ + want: New("dimension size 0 is invalid, the supporting dimension size must be between 2 ~ 5"), + }, + }, + { + name: "return an ErrInvalidDimensionSize error when current is 10 and limit is 0", + args: args{ + current: 10, + limit: 0, + }, + want: want{ + want: New("dimension size 10 is invalid, the supporting dimension size must be bigger than 2"), + }, + }, + { + name: "return an ErrInvalidDimensionSize error when current is 0 and limit is 0", + args: args{ + current: 0, + limit: 0, + }, + want: want{ + want: New("dimension size 0 is invalid, the supporting dimension size must be bigger than 2"), + }, + }, + { + name: "return an ErrInvalidDimensionSize error when current and limit are the minimum value of int", + args: args{ + current: int(math.MinInt64), + limit: int(math.MinInt64), + }, + want: want{ + want: Errorf("dimension size %d is invalid, the supporting dimension size must be between 2 ~ %d", int(math.MinInt64), int(math.MinInt64)), + }, + }, + { + name: "return an ErrInvalidDimensionSize error when current and limit are the minimum value of int", + args: args{ + current: int(math.MaxInt64), + limit: int(math.MaxInt64), + }, + want: want{ + want: Errorf("dimension size %d is invalid, the supporting dimension size must be between 2 ~ %d", int(math.MaxInt64), int(math.MaxInt64)), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrInvalidDimensionSize(test.args.current, test.args.limit) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrInvalidUUID(t *testing.T) { + type args struct { + uuid string + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrInvalidUUID error when uuid is empty string", + args: args{ + uuid: "", + }, + want: want{ + want: New("uuid \"\" is invalid"), + }, + }, + { + name: "return an ErrInvalidUUID error when uuid is foo", + args: args{ + uuid: "foo", + }, + want: want{ + want: New("uuid \"foo\" is invalid"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrInvalidUUID(test.args.uuid) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrDimensionLimitExceed(t *testing.T) { + type args struct { + current int + limit int + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrDimensionLimitExceed error when current is 10 and limit is 5", + args: args{ + current: 10, + limit: 5, + }, + want: want{ + want: New("supported dimension limit exceed:\trequired = 10,\tlimit = 5"), + }, + }, + + { + name: "return an ErrDimensionLimitExceed error when current is 0 and limit is 0", + args: args{ + current: 0, + limit: 0, + }, + want: want{ + want: New("supported dimension limit exceed:\trequired = 0,\tlimit = 0"), + }, + }, + { + name: "return an ErrDimensionLimitExceed error when current and limit are the minimum value of int", + args: args{ + current: int(math.MinInt64), + limit: int(math.MinInt64), + }, + want: want{ + want: Errorf("supported dimension limit exceed:\trequired = %d,\tlimit = %d", int(math.MinInt64), int(math.MinInt64)), + }, + }, + { + name: "return an ErrDimensionLimitExceed error when current and limit are the maximum value of int", + args: args{ + current: int(math.MaxInt64), + limit: int(math.MaxInt64), + }, + want: want{ + want: Errorf("supported dimension limit exceed:\trequired = %d,\tlimit = %d", int(math.MaxInt64), int(math.MaxInt64)), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrDimensionLimitExceed(test.args.current, test.args.limit) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrIncompatibleDimensionSize(t *testing.T) { + type args struct { + req int + dim int + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrIncompatibleDimensionSize error when req is 640 and dim is 720", + args: args{ + req: 640, + dim: 720, + }, + want: want{ + want: New("incompatible dimension size detected\trequested: 640,\tconfigured: 720"), + }, + }, + { + name: "return an ErrIncompatibleDimensionSize error when req is empty and dim is 720", + args: args{ + dim: 720, + }, + want: want{ + want: New("incompatible dimension size detected\trequested: 0,\tconfigured: 720"), + }, + }, + { + name: "return an ErrIncompatibleDimensionSize error when req is 640", + args: args{ + req: 640, + }, + want: want{ + want: New("incompatible dimension size detected\trequested: 640,\tconfigured: 0"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrIncompatibleDimensionSize(test.args.req, test.args.dim) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUnsupportedObjectType(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUnsupportedObjectType error", + want: want{ + want: New("unsupported ObjectType"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUnsupportedObjectType + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUnsupportedDistanceType(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUnsupportedDistanceType error", + want: want{ + want: New("unsupported DistanceType"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUnsupportedDistanceType + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrFailedToSetDistanceType(t *testing.T) { + type args struct { + err error + distance string + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrFailedToSetDistanceType error when err is ngt error and distance is l2", + args: args{ + err: New("ngt error"), + distance: "l2", + }, + want: want{ + want: New("failed to set distance type l2: ngt error"), + }, + }, + { + name: "return a wrapped ErrFailedToSetDistanceType error when err is ngt error and distance is empty", + args: args{ + err: New("ngt error"), + distance: "", + }, + want: want{ + want: New("failed to set distance type : ngt error"), + }, + }, + { + name: "return an ErrFailedToSetDistanceType error when err is nil and distance is cos", + args: args{ + err: nil, + distance: "cos", + }, + want: want{ + want: New("failed to set distance type cos"), + }, + }, + { + name: "return an ErrFailedToSetDistanceType error when err is nil and distance is empty", + args: args{ + err: nil, + distance: "", + }, + want: want{ + want: New("failed to set distance type "), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrFailedToSetDistanceType(test.args.err, test.args.distance) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrFailedToSetObjectType(t *testing.T) { + type args struct { + err error + t string + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrFailedToSetObjectType error when err is ngt error and t is Float", + args: args{ + err: New("ngt error"), + t: "Float", + }, + want: want{ + want: New("failed to set object type Float: ngt error"), + }, + }, + { + name: "return a wrapped ErrFailedToSetObjectType error when err is ngt error and t is empty", + args: args{ + err: New("ngt error"), + t: "", + }, + want: want{ + want: New("failed to set object type : ngt error"), + }, + }, + { + name: "return an ErrFailedToSetObjectType error when err is nil and t is Int", + args: args{ + err: nil, + t: "Int", + }, + want: want{ + want: New("failed to set object type Int"), + }, + }, + { + name: "return an ErrFailedToSetObjectType error when err is nil and t is empty", + args: args{ + err: nil, + t: "", + }, + want: want{ + want: New("failed to set object type "), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrFailedToSetObjectType(test.args.err, test.args.t) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrFailedToSetDimension(t *testing.T) { + type args struct { + err error + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrFailedToSetDimension error when err is ngt error", + args: args{ + err: New("ngt error"), + }, + want: want{ + want: New("failed to set dimension: ngt error"), + }, + }, + { + name: "return an ErrFailedToSetDimension error when err is nil", + args: args{ + err: nil, + }, + want: want{ + want: New("failed to set dimension"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrFailedToSetDimension(test.args.err) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrFailedToSetCreationEdgeSize(t *testing.T) { + type args struct { + err error + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrFailedToSetCreationEdgeSize error when err is ngt error", + args: args{ + err: New("ngt error"), + }, + want: want{ + want: New("failed to set creation edge size: ngt error"), + }, + }, + { + name: "return an ErrFailedToSetCreationEdgeSize error when err is nil", + args: args{ + err: nil, + }, + want: want{ + want: New("failed to set creation edge size"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrFailedToSetCreationEdgeSize(test.args.err) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrFailedToSetSearchEdgeSize(t *testing.T) { + type args struct { + err error + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrFailedToSetSearchEdgeSize error when err is ngt error", + args: args{ + err: New("ngt error"), + }, + want: want{ + want: New("failed to set search edge size: ngt error"), + }, + }, + { + name: "return an ErrFailedToSetSearchEdgeSize error when err is nil", + args: args{ + err: nil, + }, + want: want{ + want: New("failed to set search edge size"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrFailedToSetSearchEdgeSize(test.args.err) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUncommittedIndexExists(t *testing.T) { + type args struct { + num uint64 + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUncommittedIndexExists error when num is 100", + args: args{ + num: 100, + }, + want: want{ + want: New("100 indexes are not committed"), + }, + }, + + { + name: "return an ErrUncommittedIndexExists error when num is 0", + args: args{ + num: 0, + }, + want: want{ + want: New("0 indexes are not committed"), + }, + }, + { + name: "return an ErrUncommittedIndexExists error when num is the maximum value of uint64", + args: args{ + num: math.MaxUint64, + }, + want: want{ + want: Errorf("%d indexes are not committed", uint(math.MaxUint64)), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUncommittedIndexExists(test.args.num) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUncommittedIndexNotFound(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUncommittedIndexNotFound error", + want: want{ + want: New("uncommitted indexes are not found"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUncommittedIndexNotFound + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrCAPINotImplemented(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrCAPINotImplemented error", + want: want{ + want: New("not implemented in C API"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrCAPINotImplemented + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUUIDAlreadyExists(t *testing.T) { + type args struct { + uuid string + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUUIDAlreadyExists error when uuid is 550e8400-e29b-41d4", + args: args{ + uuid: "550e8400-e29b-41d4", + }, + want: want{ + want: New("uuid 550e8400-e29b-41d4 index already exists"), + }, + }, + { + name: "return an ErrUUIDAlreadyExists error when uuid is empty", + args: args{ + uuid: "", + }, + want: want{ + want: New("uuid index already exists"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUUIDAlreadyExists(test.args.uuid) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUUIDNotFound(t *testing.T) { + type args struct { + id uint32 + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUUIDNotFound error when id is 1234", + args: args{ + id: 1234, + }, + want: want{ + want: New("object uuid 1234's metadata not found"), + }, + }, + { + name: "return an ErrUUIDNotFound error when id is the maximum value of uint32", + args: args{ + id: math.MaxUint32, + }, + want: want{ + want: Errorf("object uuid %d's metadata not found", math.MaxUint32), + }, + }, + { + name: "return an ErrUUIDNotFound error when id is 0", + args: args{ + id: 0, + }, + want: want{ + want: New("object uuid not found"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUUIDNotFound(test.args.id) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrObjectIDNotFound(t *testing.T) { + type args struct { + uuid string + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrObjectIDNotFound error when uuid is 550e8400-e29b-41d4.", + args: args{ + uuid: "550e8400-e29b-41d4", + }, + want: want{ + want: New("uuid 550e8400-e29b-41d4's object id not found"), + }, + }, + { + name: "return an ErrObjectIDNotFound error when uuid is empty.", + args: args{ + uuid: "", + }, + want: want{ + want: New("uuid 's object id not found"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrObjectIDNotFound(test.args.uuid) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrRemoveRequestedBeforeIndexing(t *testing.T) { + type args struct { + oid uint + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrRemoveRequestedBeforeIndexing error when oid is 100", + args: args{ + oid: 100, + }, + want: want{ + want: New("object id 100 is not indexed we cannot remove it"), + }, + }, + { + name: "return an ErrRemoveRequestedBeforeIndexing error when oid is 0", + args: args{ + oid: 0, + }, + want: want{ + want: New("object id 0 is not indexed we cannot remove it"), + }, + }, + { + name: "return an ErrRemoveRequestedBeforeIndexing error when oid is maximum value of uint", + args: args{ + oid: uint(math.MaxUint64), + }, + want: want{ + want: Errorf("object id %d is not indexed we cannot remove it", uint(math.MaxUint64)), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrRemoveRequestedBeforeIndexing(test.args.oid) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + // NOT IMPLEMENTED BELOW diff --git a/internal/errors/corrector.go b/internal/errors/corrector.go index 5fbc08f44b..d1328b0a78 100644 --- a/internal/errors/corrector.go +++ b/internal/errors/corrector.go @@ -32,12 +32,9 @@ var ErrNoAvailableAgentToRemove = New("no available agent to remove replica") // ErrFailedToCorrectReplicaNum represents an error that failed to correct replica number after correction process. var ErrFailedToCorrectReplicaNum = New("failed to correct replica number after correction process") -// ErrFailedToReceiveVectorFromStream represents an error that failed to receive vector from stream while index correction process. -var ErrFailedToReceiveVectorFromStream = New("failed to receive vector from stream") - // ErrFailedToCheckConsistency represents an error that failed to check consistency process while index correction process. var ErrFailedToCheckConsistency = func(err error) error { - return Wrap(err, "failed to check consistency while index correctioin process") + return Wrap(err, "failed to check consistency while index correction process") } // ErrStreamListObjectStreamFinishedUnexpectedly represents an error that StreamListObject finished not because of io.EOF. diff --git a/internal/errors/grpc.go b/internal/errors/grpc.go index 4df3243912..9a39aae5db 100644 --- a/internal/errors/grpc.go +++ b/internal/errors/grpc.go @@ -63,7 +63,7 @@ var ( // ErrGRPCUnexpectedStatusError represents an error that the gRPC status code is undefined. ErrGRPCUnexpectedStatusError = func(code string, err error) error { - return Wrapf(err, "unexcepted error detected: code %s", code) + return Wrapf(err, "unexpected error detected: code %s", code) } // ErrInvalidProtoMessageType represents an error that the gRPC protocol buffers message type is invalid. diff --git a/internal/errors/net.go b/internal/errors/net.go index d21de4845a..1a64fa9cf6 100644 --- a/internal/errors/net.go +++ b/internal/errors/net.go @@ -37,7 +37,7 @@ var ( return Errorf("no port available for Host: %s\tbetween %d ~ %d", host, start, end) } - // ErrLookupIPAddrNotFound represents a function to generate an error that the host's ip address could not discovererd from DNS. + // ErrLookupIPAddrNotFound represents a function to generate an error that the host's ip address could not discovered from DNS. ErrLookupIPAddrNotFound = func(host string) error { return Errorf("failed to lookup ip addrs for host: %s", host) } diff --git a/internal/errors/ngt.go b/internal/errors/ngt.go index f055892047..8ecc199899 100644 --- a/internal/errors/ngt.go +++ b/internal/errors/ngt.go @@ -18,35 +18,6 @@ package errors var ( - - // ErrFlushingIsInProgress represents an error that the flushing is in progress, but any request has been received. - ErrFlushingIsInProgress = New("flush is in progress") - - // ErrUUIDAlreadyExists represents a function to generate an error that the uuid already exists. - ErrUUIDAlreadyExists = func(uuid string) error { - return Errorf("ngt uuid %s index already exists", uuid) - } - - // ErrUUIDNotFound represents a function to generate an error that the uuid is not found. - ErrUUIDNotFound = func(id uint32) error { - if id == 0 { - return New("ngt object uuid not found") - } - return Errorf("ngt object uuid %d's metadata not found", id) - } - - // ErrObjectIDNotFound represents a function to generate an error that the object id is not found. - ErrObjectIDNotFound = func(uuid string) error { - return Errorf("ngt uuid %s's object id not found", uuid) - } - - // ErrRemoveRequestedBeforeIndexing represents a function to generate an error that the object is not indexed so can not remove it. - ErrRemoveRequestedBeforeIndexing = func(oid uint) error { - return Errorf("object id %d is not indexed we cannot remove it", oid) - } - - ErrSearchResultEmptyButNoDataStored = New("empty search result from cgo but no index data stored in ngt, this error can be ignored.") - ErrNGTIndexStatisticsDisabled = New("ngt get statistics is disabled") ErrNGTIndexStatisticsNotReady = New("ngt get statistics is not ready") diff --git a/internal/errors/ngt_test.go b/internal/errors/ngt_test.go index 232ab1c46a..7121ee3e42 100644 --- a/internal/errors/ngt_test.go +++ b/internal/errors/ngt_test.go @@ -17,1411 +17,6 @@ // Package errors package errors -import ( - "math" - "testing" -) - -func TestErrCreateProperty(t *testing.T) { - type args struct { - err error - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrCreateProperty error when err is ngt error", - args: args{ - err: New("ngt error"), - }, - want: want{ - want: New("failed to create property: ngt error"), - }, - }, - { - name: "return an ErrCreateProperty error when err is nil", - args: args{ - err: nil, - }, - want: want{ - want: New("failed to create property"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrCreateProperty(test.args.err) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrIndexNotFound(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrIndexNotFound error", - want: want{ - want: New("index not found"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrIndexNotFound - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrIndexLoadTimeout(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrIndexLoadTimeout error", - want: want{ - want: New("index load timeout"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrIndexLoadTimeout - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrInvalidDimensionSize(t *testing.T) { - type args struct { - current int - limit int - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrInvalidDimensionSize error when current is 10 and limit is 5", - args: args{ - current: 10, - limit: 5, - }, - want: want{ - want: New("dimension size 10 is invalid, the supporting dimension size must be between 2 ~ 5"), - }, - }, - { - name: "return an ErrInvalidDimensionSize error when current is 0 and limit is 5", - args: args{ - current: 0, - limit: 5, - }, - want: want{ - want: New("dimension size 0 is invalid, the supporting dimension size must be between 2 ~ 5"), - }, - }, - { - name: "return an ErrInvalidDimensionSize error when current is 10 and limit is 0", - args: args{ - current: 10, - limit: 0, - }, - want: want{ - want: New("dimension size 10 is invalid, the supporting dimension size must be bigger than 2"), - }, - }, - { - name: "return an ErrInvalidDimensionSize error when current is 0 and limit is 0", - args: args{ - current: 0, - limit: 0, - }, - want: want{ - want: New("dimension size 0 is invalid, the supporting dimension size must be bigger than 2"), - }, - }, - { - name: "return an ErrInvalidDimensionSize error when current and limit are the minimum value of int", - args: args{ - current: int(math.MinInt64), - limit: int(math.MinInt64), - }, - want: want{ - want: Errorf("dimension size %d is invalid, the supporting dimension size must be between 2 ~ %d", int(math.MinInt64), int(math.MinInt64)), - }, - }, - { - name: "return an ErrInvalidDimensionSize error when current and limit are the minimum value of int", - args: args{ - current: int(math.MaxInt64), - limit: int(math.MaxInt64), - }, - want: want{ - want: Errorf("dimension size %d is invalid, the supporting dimension size must be between 2 ~ %d", int(math.MaxInt64), int(math.MaxInt64)), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrInvalidDimensionSize(test.args.current, test.args.limit) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrInvalidUUID(t *testing.T) { - type args struct { - uuid string - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrInvalidUUID error when uuid is empty string", - args: args{ - uuid: "", - }, - want: want{ - want: New("uuid \"\" is invalid"), - }, - }, - { - name: "return an ErrInvalidUUID error when uuid is foo", - args: args{ - uuid: "foo", - }, - want: want{ - want: New("uuid \"foo\" is invalid"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrInvalidUUID(test.args.uuid) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrDimensionLimitExceed(t *testing.T) { - type args struct { - current int - limit int - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrDimensionLimitExceed error when current is 10 and limit is 5", - args: args{ - current: 10, - limit: 5, - }, - want: want{ - want: New("supported dimension limit exceed:\trequired = 10,\tlimit = 5"), - }, - }, - - { - name: "return an ErrDimensionLimitExceed error when current is 0 and limit is 0", - args: args{ - current: 0, - limit: 0, - }, - want: want{ - want: New("supported dimension limit exceed:\trequired = 0,\tlimit = 0"), - }, - }, - { - name: "return an ErrDimensionLimitExceed error when current and limit are the minimum value of int", - args: args{ - current: int(math.MinInt64), - limit: int(math.MinInt64), - }, - want: want{ - want: Errorf("supported dimension limit exceed:\trequired = %d,\tlimit = %d", int(math.MinInt64), int(math.MinInt64)), - }, - }, - { - name: "return an ErrDimensionLimitExceed error when current and limit are the maximum value of int", - args: args{ - current: int(math.MaxInt64), - limit: int(math.MaxInt64), - }, - want: want{ - want: Errorf("supported dimension limit exceed:\trequired = %d,\tlimit = %d", int(math.MaxInt64), int(math.MaxInt64)), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrDimensionLimitExceed(test.args.current, test.args.limit) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrIncompatibleDimensionSize(t *testing.T) { - type args struct { - req int - dim int - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrIncompatibleDimensionSize error when req is 640 and dim is 720", - args: args{ - req: 640, - dim: 720, - }, - want: want{ - want: New("incompatible dimension size detected\trequested: 640,\tconfigured: 720"), - }, - }, - { - name: "return an ErrIncompatibleDimensionSize error when req is empty and dim is 720", - args: args{ - dim: 720, - }, - want: want{ - want: New("incompatible dimension size detected\trequested: 0,\tconfigured: 720"), - }, - }, - { - name: "return an ErrIncompatibleDimensionSize error when req is 640", - args: args{ - req: 640, - }, - want: want{ - want: New("incompatible dimension size detected\trequested: 640,\tconfigured: 0"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrIncompatibleDimensionSize(test.args.req, test.args.dim) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUnsupportedObjectType(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUnsupportedObjectType error", - want: want{ - want: New("unsupported ObjectType"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUnsupportedObjectType - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUnsupportedDistanceType(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUnsupportedDistanceType error", - want: want{ - want: New("unsupported DistanceType"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUnsupportedDistanceType - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrFailedToSetDistanceType(t *testing.T) { - type args struct { - err error - distance string - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrFailedToSetDistanceType error when err is ngt error and distance is l2", - args: args{ - err: New("ngt error"), - distance: "l2", - }, - want: want{ - want: New("failed to set distance type l2: ngt error"), - }, - }, - { - name: "return a wrapped ErrFailedToSetDistanceType error when err is ngt error and distance is empty", - args: args{ - err: New("ngt error"), - distance: "", - }, - want: want{ - want: New("failed to set distance type : ngt error"), - }, - }, - { - name: "return an ErrFailedToSetDistanceType error when err is nil and distance is cos", - args: args{ - err: nil, - distance: "cos", - }, - want: want{ - want: New("failed to set distance type cos"), - }, - }, - { - name: "return an ErrFailedToSetDistanceType error when err is nil and distance is empty", - args: args{ - err: nil, - distance: "", - }, - want: want{ - want: New("failed to set distance type "), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrFailedToSetDistanceType(test.args.err, test.args.distance) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrFailedToSetObjectType(t *testing.T) { - type args struct { - err error - t string - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrFailedToSetObjectType error when err is ngt error and t is Float", - args: args{ - err: New("ngt error"), - t: "Float", - }, - want: want{ - want: New("failed to set object type Float: ngt error"), - }, - }, - { - name: "return a wrapped ErrFailedToSetObjectType error when err is ngt error and t is empty", - args: args{ - err: New("ngt error"), - t: "", - }, - want: want{ - want: New("failed to set object type : ngt error"), - }, - }, - { - name: "return an ErrFailedToSetObjectType error when err is nil and t is Int", - args: args{ - err: nil, - t: "Int", - }, - want: want{ - want: New("failed to set object type Int"), - }, - }, - { - name: "return an ErrFailedToSetObjectType error when err is nil and t is empty", - args: args{ - err: nil, - t: "", - }, - want: want{ - want: New("failed to set object type "), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrFailedToSetObjectType(test.args.err, test.args.t) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrFailedToSetDimension(t *testing.T) { - type args struct { - err error - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrFailedToSetDimension error when err is ngt error", - args: args{ - err: New("ngt error"), - }, - want: want{ - want: New("failed to set dimension: ngt error"), - }, - }, - { - name: "return an ErrFailedToSetDimension error when err is nil", - args: args{ - err: nil, - }, - want: want{ - want: New("failed to set dimension"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrFailedToSetDimension(test.args.err) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrFailedToSetCreationEdgeSize(t *testing.T) { - type args struct { - err error - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrFailedToSetCreationEdgeSize error when err is ngt error", - args: args{ - err: New("ngt error"), - }, - want: want{ - want: New("failed to set creation edge size: ngt error"), - }, - }, - { - name: "return an ErrFailedToSetCreationEdgeSize error when err is nil", - args: args{ - err: nil, - }, - want: want{ - want: New("failed to set creation edge size"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrFailedToSetCreationEdgeSize(test.args.err) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrFailedToSetSearchEdgeSize(t *testing.T) { - type args struct { - err error - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrFailedToSetSearchEdgeSize error when err is ngt error", - args: args{ - err: New("ngt error"), - }, - want: want{ - want: New("failed to set search edge size: ngt error"), - }, - }, - { - name: "return an ErrFailedToSetSearchEdgeSize error when err is nil", - args: args{ - err: nil, - }, - want: want{ - want: New("failed to set search edge size"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrFailedToSetSearchEdgeSize(test.args.err) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUncommittedIndexExists(t *testing.T) { - type args struct { - num uint64 - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUncommittedIndexExists error when num is 100", - args: args{ - num: 100, - }, - want: want{ - want: New("100 indexes are not committed"), - }, - }, - - { - name: "return an ErrUncommittedIndexExists error when num is 0", - args: args{ - num: 0, - }, - want: want{ - want: New("0 indexes are not committed"), - }, - }, - { - name: "return an ErrUncommittedIndexExists error when num is the maximum value of uint64", - args: args{ - num: math.MaxUint64, - }, - want: want{ - want: Errorf("%d indexes are not committed", uint(math.MaxUint64)), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUncommittedIndexExists(test.args.num) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUncommittedIndexNotFound(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUncommittedIndexNotFound error", - want: want{ - want: New("uncommitted indexes are not found"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUncommittedIndexNotFound - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrCAPINotImplemented(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrCAPINotImplemented error", - want: want{ - want: New("not implemented in C API"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrCAPINotImplemented - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUUIDAlreadyExists(t *testing.T) { - type args struct { - uuid string - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUUIDAlreadyExists error when uuid is 550e8400-e29b-41d4", - args: args{ - uuid: "550e8400-e29b-41d4", - }, - want: want{ - want: New("ngt uuid 550e8400-e29b-41d4 index already exists"), - }, - }, - { - name: "return an ErrUUIDAlreadyExists error when uuid is empty", - args: args{ - uuid: "", - }, - want: want{ - want: New("ngt uuid index already exists"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUUIDAlreadyExists(test.args.uuid) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUUIDNotFound(t *testing.T) { - type args struct { - id uint32 - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUUIDNotFound error when id is 1234", - args: args{ - id: 1234, - }, - want: want{ - want: New("ngt object uuid 1234's metadata not found"), - }, - }, - { - name: "return an ErrUUIDNotFound error when id is the maximum value of uint32", - args: args{ - id: math.MaxUint32, - }, - want: want{ - want: Errorf("ngt object uuid %d's metadata not found", math.MaxUint32), - }, - }, - { - name: "return an ErrUUIDNotFound error when id is 0", - args: args{ - id: 0, - }, - want: want{ - want: New("ngt object uuid not found"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUUIDNotFound(test.args.id) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrObjectIDNotFound(t *testing.T) { - type args struct { - uuid string - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrObjectIDNotFound error when uuid is 550e8400-e29b-41d4.", - args: args{ - uuid: "550e8400-e29b-41d4", - }, - want: want{ - want: New("ngt uuid 550e8400-e29b-41d4's object id not found"), - }, - }, - { - name: "return an ErrObjectIDNotFound error when uuid is empty.", - args: args{ - uuid: "", - }, - want: want{ - want: New("ngt uuid 's object id not found"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrObjectIDNotFound(test.args.uuid) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrRemoveRequestedBeforeIndexing(t *testing.T) { - type args struct { - oid uint - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrRemoveRequestedBeforeIndexing error when oid is 100", - args: args{ - oid: 100, - }, - want: want{ - want: New("object id 100 is not indexed we cannot remove it"), - }, - }, - { - name: "return an ErrRemoveRequestedBeforeIndexing error when oid is 0", - args: args{ - oid: 0, - }, - want: want{ - want: New("object id 0 is not indexed we cannot remove it"), - }, - }, - { - name: "return an ErrRemoveRequestedBeforeIndexing error when oid is maximum value of uint", - args: args{ - oid: uint(math.MaxUint64), - }, - want: want{ - want: Errorf("object id %d is not indexed we cannot remove it", uint(math.MaxUint64)), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrRemoveRequestedBeforeIndexing(test.args.oid) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - // NOT IMPLEMENTED BELOW // // func TestNewNGTError(t *testing.T) { diff --git a/internal/errors/option_test.go b/internal/errors/option_test.go index 8fba808dde..f5c904bbdc 100644 --- a/internal/errors/option_test.go +++ b/internal/errors/option_test.go @@ -50,7 +50,7 @@ func TestNewErrInvalidOption(t *testing.T) { name := "WithPort" val := 9000 return test{ - name: "return ErrInvalidOpton when name and val have a value and errs is empty.", + name: "return ErrInvalidOption when name and val have a value and errs is empty.", args: args{ name: name, val: val, @@ -70,7 +70,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := errs[0] return test{ - name: "return ErrInvalidOpton when all of parameter has value.", + name: "return ErrInvalidOption when all of parameter has value.", args: args{ name: name, val: val, @@ -93,7 +93,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := errs[1] return test{ - name: "return ErrInvalidOpton when all of parameter has value and errs has nil as value.", + name: "return ErrInvalidOption when all of parameter has value and errs has nil as value.", args: args{ name: name, val: val, @@ -115,7 +115,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := Wrap(errs[1], errs[0].Error()) return test{ - name: "return ErrInvalidOpton when name is nil and val and errs have values.", + name: "return ErrInvalidOption when name is nil and val and errs have values.", args: args{ val: val, errs: errs, @@ -136,7 +136,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := Wrap(errs[1], errs[0].Error()) return test{ - name: "return ErrInvalidOpton when val is nil and name and errs have values.", + name: "return ErrInvalidOption when val is nil and name and errs have values.", args: args{ name: name, errs: errs, diff --git a/internal/errors/redis.go b/internal/errors/redis.go index c27b47af56..99fd542132 100644 --- a/internal/errors/redis.go +++ b/internal/errors/redis.go @@ -21,7 +21,7 @@ var ( // ErrRedisInvalidKVVKPrefix represents a function to generate an error that kv index and vk prefix are invalid. ErrRedisInvalidKVVKPrefix = func(kv, vk string) error { - return Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", kv, vk) + return Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", kv, vk) } // ErrRedisNotFoundIdentity generates an RedisNotFoundIdentityError error. diff --git a/internal/errors/redis_test.go b/internal/errors/redis_test.go index c399adf6e7..8355f9fba3 100644 --- a/internal/errors/redis_test.go +++ b/internal/errors/redis_test.go @@ -24,7 +24,7 @@ import ( "github.com/vdaas/vald/internal/test/goleak" ) -func TestErrRedisInvalidKVVKPrefic(t *testing.T) { +func TestErrRedisInvalidKVVKPrefix(t *testing.T) { type fields struct { kv string vk string @@ -56,7 +56,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { vk: str, }, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", str, str), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", str, str), }, } }(), @@ -67,7 +67,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { kv: str, }, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", str, ""), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", str, ""), }, } }(), @@ -78,7 +78,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { vk: str, }, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", "", str), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", "", str), }, } }(), @@ -87,7 +87,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { name: "return an ErrRedisInvalidKVVKPrefix error when kv and vk are empty", fields: fields{}, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", "", ""), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", "", ""), }, } }(), @@ -167,7 +167,7 @@ func TestErrRedisNotFoundIdentity(t *testing.T) { } } -func TestErrRdisNotFound(t *testing.T) { +func TestErrRedisNotFound(t *testing.T) { type fields struct { key string } @@ -304,7 +304,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { tests := []test{ func() test { return test{ - name: "return a wraped error when key is not empty and err is not nil", + name: "return a wrapped error when key is not empty and err is not nil", fields: fields{ key: key, err: err, @@ -316,7 +316,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is not empty and err is nil", + name: "return a wrapped error when key is not empty and err is nil", fields: fields{ key: key, }, @@ -327,7 +327,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is not nil", + name: "return a wrapped error when key is empty and err is not nil", fields: fields{ err: err, }, @@ -338,7 +338,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is nil", + name: "return a wrapped error when key is empty and err is nil", fields: fields{}, want: want{ want: Wrap(nil, "Failed to fetch key ()"), @@ -396,7 +396,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { tests := []test{ func() test { return test{ - name: "return a wraped error when key is not empty and err is not nil", + name: "return a wrapped error when key is not empty and err is not nil", fields: fields{ key: key, err: err, @@ -408,7 +408,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is not empty and err is nil", + name: "return a wrapped error when key is not empty and err is nil", fields: fields{ key: key, }, @@ -419,7 +419,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is not nil", + name: "return a wrapped error when key is empty and err is not nil", fields: fields{ err: err, }, @@ -430,7 +430,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is nil", + name: "return a wrapped error when key is empty and err is nil", fields: fields{}, want: want{ want: Wrap(nil, "Failed to set key ()"), @@ -488,7 +488,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { tests := []test{ func() test { return test{ - name: "return a wraped error when key is not empty and err is not nil", + name: "return a wrapped error when key is not empty and err is not nil", fields: fields{ key: key, err: err, @@ -500,7 +500,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is not empty and err is nil", + name: "return a wrapped error when key is not empty and err is nil", fields: fields{ key: key, }, @@ -511,7 +511,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is not nil", + name: "return a wrapped error when key is empty and err is not nil", fields: fields{ err: err, }, @@ -522,7 +522,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is nil", + name: "return a wrapped error when key is empty and err is nil", fields: fields{}, want: want{ want: Wrap(nil, "Failed to delete key ()"), diff --git a/internal/errors/tls.go b/internal/errors/tls.go index 00357926c9..e714347ddf 100644 --- a/internal/errors/tls.go +++ b/internal/errors/tls.go @@ -20,10 +20,10 @@ package errors var ( // TLS. - // ErrTLSDisabled is error variable, it's replesents config error that tls is disabled by config. + // ErrTLSDisabled is error variable, it's represents config error that tls is disabled by config. ErrTLSDisabled = New("tls feature is disabled") - // ErrTLSCertOrKeyNotFound is error variable, it's replesents tls cert or key not found error. + // ErrTLSCertOrKeyNotFound is error variable, it's represents tls cert or key not found error. ErrTLSCertOrKeyNotFound = New("cert or key file path not found") ErrCertificationFailed = New("certification failed") diff --git a/internal/errors/vald.go b/internal/errors/vald.go index 949489a0b7..7ba363fcde 100644 --- a/internal/errors/vald.go +++ b/internal/errors/vald.go @@ -25,7 +25,7 @@ var ( // ErrSameVectorAlreadyExists represents an error that vald already has same features vector data. ErrSameVectorAlreadyExists = func(meta string, n, o []float32) error { - return Errorf("vald metadata:\t%s\talready exists reqested: %v, stored: %v", meta, n, o) + return Errorf("vald metadata:\t%s\talready exists requested: %v, stored: %v", meta, n, o) } // ErrMetaDataCannotFetch represents an error that vald metadata cannot fetch. diff --git a/internal/info/info.go b/internal/info/info.go index 6f5f31e0f9..223a507b07 100644 --- a/internal/info/info.go +++ b/internal/info/info.go @@ -280,7 +280,7 @@ func (d Detail) String() string { return "\n" + strings.Join(strs, "\n") } -// Get returns parased Detail object. +// Get returns parsed Detail object. func (i *info) Get() Detail { i.prepare() return i.getDetail() diff --git a/internal/log/level/level.go b/internal/log/level/level.go index e4be79e98b..57ae0dae15 100644 --- a/internal/log/level/level.go +++ b/internal/log/level/level.go @@ -56,20 +56,44 @@ func (l Level) String() string { } func Atol(str string) Level { - str = strings.ToUpper(str) - for i := len(str); i > 0; i-- { - switch str[:i] { - case DEBUG.String(), "DEB", "DEBG", "DB", "DBG", "D": - return DEBUG - case INFO.String(), "IFO", "INF", "IF", "IN", "I": - return INFO - case WARN.String(), "WARNING", "WAR", "WRN", "WN", "W": - return WARN - case ERROR.String(), "ERROR", "ERRO", "ER", "ERR", "E": - return ERROR - case FATAL.String(), "FATA", "FAT", "FT", "FL", "F": - return FATAL - } + l, ok := map[string]Level{ + DEBUG.String(): DEBUG, + DEBUG.String() + "S": DEBUG, + "D": DEBUG, + "DB": DEBUG, + "DBG": DEBUG, + "DEB": DEBUG, + "DEBG": DEBUG, + INFO.String(): INFO, + INFO.String() + "S": INFO, + "I": INFO, + "IF": INFO, + "IFO": INFO, + "IN": INFO, + "INF": INFO, + WARN.String(): WARN, + WARN.String() + "S": WARN, + "W": WARN, + "WAR": WARN, + "WARNING": WARN, + "WN": WARN, + "WRN": WARN, + ERROR.String(): ERROR, + ERROR.String() + "S": ERROR, + "E": ERROR, + "ER": ERROR, + "ERR": ERROR, + "ERRO": ERROR, + FATAL.String(): FATAL, + FATAL.String() + "S": FATAL, + "F": FATAL, + "FAT": FATAL, + "FATA": FATAL, + "FL": FATAL, + "FT": FATAL, + }[strings.ToUpper(str)] + if ok { + return l } return Unknown } diff --git a/internal/log/option_test.go b/internal/log/option_test.go index 62bf9511fd..64a9ebecf3 100644 --- a/internal/log/option_test.go +++ b/internal/log/option_test.go @@ -148,7 +148,7 @@ func TestWithLoggerType(t *testing.T) { { name: "set nothing when str is invalid", args: args{ - str: "valdvaldinvalid", + str: "invalid", }, want: want{ obj: new(T), @@ -227,7 +227,7 @@ func TestWithLevel(t *testing.T) { { name: "set nothing when str is invalid", args: args{ - str: "valdvaldinvalid", + str: "invalid", }, want: want{ obj: new(T), @@ -312,7 +312,7 @@ func TestWithFormat(t *testing.T) { return test{ name: "set nothing when str is invalid", args: args{ - str: "valdvaldinvalid", + str: "invalid", }, want: want{ obj: new(T), diff --git a/internal/net/dialer_test.go b/internal/net/dialer_test.go index 3e366b1f22..22183ede9e 100644 --- a/internal/net/dialer_test.go +++ b/internal/net/dialer_test.go @@ -1163,7 +1163,7 @@ func Test_dialer_cachedDialer(t *testing.T) { // check the connection made on the same port _, p, _ := net.SplitHostPort(gotConn.RemoteAddr().String()) if p != strconv.Itoa(int(port)) { - return errors.Errorf("unexcepted port number, except: %d, got: %s", port, p) + return errors.Errorf("unexpected port number, except: %d, got: %s", port, p) } // read the output from the server and check if it is equals to the count diff --git a/internal/net/grpc/interceptor/client/metric/metric.go b/internal/net/grpc/interceptor/client/metric/metric.go index 7bcec5833f..2530fd06c8 100644 --- a/internal/net/grpc/interceptor/client/metric/metric.go +++ b/internal/net/grpc/interceptor/client/metric/metric.go @@ -40,7 +40,7 @@ const ( func ClientMetricInterceptors() (grpc.UnaryClientInterceptor, grpc.StreamClientInterceptor, error) { meter := metrics.GetMeter() - latencyHistgram, err := meter.Float64Histogram( + latencyHistogram, err := meter.Float64Histogram( latencyMetricsName, metrics.WithDescription("Client latency in milliseconds, by method"), metrics.WithUnit(metrics.Milliseconds), @@ -60,7 +60,7 @@ func ClientMetricInterceptors() (grpc.UnaryClientInterceptor, grpc.StreamClientI record := func(ctx context.Context, method string, err error, latency float64) { attrs := attributesFromError(method, err) - latencyHistgram.Record(ctx, latency, metrics.WithAttributes(attrs...)) + latencyHistogram.Record(ctx, latency, metrics.WithAttributes(attrs...)) completedRPCCnt.Add(ctx, 1, metrics.WithAttributes(attrs...)) } return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { diff --git a/internal/net/grpc/interceptor/server/metric/metric.go b/internal/net/grpc/interceptor/server/metric/metric.go index 574386d483..d0bba7e075 100644 --- a/internal/net/grpc/interceptor/server/metric/metric.go +++ b/internal/net/grpc/interceptor/server/metric/metric.go @@ -36,7 +36,7 @@ const ( func MetricInterceptors() (grpc.UnaryServerInterceptor, grpc.StreamServerInterceptor, error) { meter := metrics.GetMeter() - latencyHistgram, err := meter.Float64Histogram( + latencyHistogram, err := meter.Float64Histogram( latencyMetricsName, metrics.WithDescription("Server latency in milliseconds, by method"), metrics.WithUnit(metrics.Milliseconds), @@ -56,7 +56,7 @@ func MetricInterceptors() (grpc.UnaryServerInterceptor, grpc.StreamServerInterce record := func(ctx context.Context, method string, err error, latency float64) { attrs := attributesFromError(method, err) - latencyHistgram.Record(ctx, latency, metrics.WithAttributes(attrs...)) + latencyHistogram.Record(ctx, latency, metrics.WithAttributes(attrs...)) completedRPCCnt.Add(ctx, 1, metrics.WithAttributes(attrs...)) } return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { diff --git a/internal/net/http/json/json_test.go b/internal/net/http/json/json_test.go index a0f3dbef24..d12189dcf9 100644 --- a/internal/net/http/json/json_test.go +++ b/internal/net/http/json/json_test.go @@ -377,7 +377,7 @@ func TestErrorHandler(t *testing.T) { } if got, want := w.Code, http.StatusInternalServerError; got != want { - return errors.Errorf("reso code not equals. want: %v, got: %v", http.StatusInternalServerError, got) + return errors.Errorf("response code not equals. want: %v, got: %v", http.StatusInternalServerError, got) } return nil }, @@ -666,7 +666,7 @@ func TestRequest(t *testing.T) { ctx context.Context method string url string - payloyd any + payload any data any } type want struct { @@ -694,7 +694,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "@", url: "/", - payloyd: nil, + payload: nil, data: nil, }, want: want{ @@ -709,7 +709,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "POST", url: "/", - payloyd: 1 + 3i, + payload: 1 + 3i, data: new(any), }, checkFunc: func(w want, err error) error { @@ -730,7 +730,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "POST", url: "/", - payloyd: "1", + payload: "1", data: new(any), }, want: want{ @@ -753,7 +753,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "POST", url: srv.URL, - payloyd: "1", + payload: "1", data: &got, }, want: want{ @@ -791,7 +791,7 @@ func TestRequest(t *testing.T) { checkFunc = defaultCheckFunc } - err := Request(test.args.ctx, test.args.method, test.args.url, test.args.payloyd, test.args.data) + err := Request(test.args.ctx, test.args.method, test.args.url, test.args.payload, test.args.data) if err := checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } diff --git a/internal/params/params_test.go b/internal/params/params_test.go index 70fabbb945..1ddf089134 100644 --- a/internal/params/params_test.go +++ b/internal/params/params_test.go @@ -285,7 +285,7 @@ func Test_parser_Parse(t *testing.T) { beforeFunc: func(t *testing.T) { t.Helper() os.Args = []string{ - "test", "--path=config.yml", + "test", "--path=config.yaml", } }, afterFunc: func(t *testing.T) { @@ -296,7 +296,7 @@ func Test_parser_Parse(t *testing.T) { want1: true, err: &os.PathError{ Op: "stat", - Path: "config.yml", + Path: "config.yaml", Err: syscall.Errno(0x2), }, }, diff --git a/internal/servers/server/option_test.go b/internal/servers/server/option_test.go index 9e7aeda2bf..10b6d16cf7 100644 --- a/internal/servers/server/option_test.go +++ b/internal/servers/server/option_test.go @@ -2826,6 +2826,771 @@ func TestDefaultHealthServerOption(t *testing.T) { // } // } // +// func TestWithHTTP2Enabled(t *testing.T) { +// type args struct { +// enabled bool +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// enabled:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// enabled:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithHTTP2Enabled(test.args.enabled) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithHandlerLimit(t *testing.T) { +// type args struct { +// size int +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithHandlerLimit(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithPermitProhibitedCipherSuites(t *testing.T) { +// type args struct { +// perm bool +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// perm:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// perm:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithPermitProhibitedCipherSuites(test.args.perm) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxUploadBufferPerConnection(t *testing.T) { +// type args struct { +// size int32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxUploadBufferPerConnection(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxUploadBufferPerStream(t *testing.T) { +// type args struct { +// size int32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxUploadBufferPerStream(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxConcurrentStreams(t *testing.T) { +// type args struct { +// size uint32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxConcurrentStreams(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxDecoderHeaderTableSize(t *testing.T) { +// type args struct { +// size uint32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxDecoderHeaderTableSize(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxEncoderHeaderTableSize(t *testing.T) { +// type args struct { +// size uint32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxEncoderHeaderTableSize(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxReadFrameSize(t *testing.T) { +// type args struct { +// size uint32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxReadFrameSize(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func TestWithGRPCKeepaliveMinTime(t *testing.T) { // type args struct { // min string diff --git a/internal/tls/tls.go b/internal/tls/tls.go index 64811db689..7b8869a2f4 100644 --- a/internal/tls/tls.go +++ b/internal/tls/tls.go @@ -49,7 +49,7 @@ var ( // NewTLSConfig returns a *tls.Config struct or error // This function read TLS configuration and initialize *tls.Config struct. // This function initialize TLS configuration, for example the CA certificate and key to start TLS server. -// Server and CA Certificate, and private key will read from a file from the file path definied in environment variable. +// Server and CA Certificate, and private key will read from a file from the file path defined in environment variable. func New(opts ...Option) (*Config, error) { c, err := newCredential(opts...) if err != nil { diff --git a/internal/worker/queue.go b/internal/worker/queue.go index 0ec5a7c68c..3abf9d89c0 100644 --- a/internal/worker/queue.go +++ b/internal/worker/queue.go @@ -64,8 +64,8 @@ func NewQueue(opts ...QueueOption) (Queue, error) { return q, nil } -// Start starts execute queueing if queue is not runnnig. -// If queue is already reunning, it returns error. +// Start starts execute queueing if queue is not running. +// If queue is already running, it returns error. // It returns the error channel that the queueing job return. func (q *queue) Start(ctx context.Context) (<-chan error, error) { if q.isRunning() { @@ -132,7 +132,7 @@ func (q *queue) Push(ctx context.Context, job JobFunc) error { } } -// Pop returns (JobFunc, nil) if the channnel, which will be used for queuing job, contains JobFunc. +// Pop returns (JobFunc, nil) if the channel, which will be used for queuing job, contains JobFunc. // It returns (nil ,error) if it failed to pop from the job queue. func (q *queue) Pop(ctx context.Context) (JobFunc, error) { tryCnt := int(q.Len()) + 1 // include the first try diff --git a/internal/worker/queue_option.go b/internal/worker/queue_option.go index 7b432e747a..a2be4b4c14 100644 --- a/internal/worker/queue_option.go +++ b/internal/worker/queue_option.go @@ -52,7 +52,7 @@ func WithQueueErrGroup(eg errgroup.Group) QueueOption { } // WithQueueCheckDuration returns the option to set the qcdur for queue. -// If dur is invalid string, it returns errror. +// If dur is invalid string, it returns error. func WithQueueCheckDuration(dur string) QueueOption { return func(q *queue) error { if len(dur) == 0 { diff --git a/k8s/metrics/loki/promtail.yaml b/k8s/metrics/loki/promtail.yaml index 75f7d5578d..cfbf993c9d 100644 --- a/k8s/metrics/loki/promtail.yaml +++ b/k8s/metrics/loki/promtail.yaml @@ -33,7 +33,7 @@ spec: spec: containers: - args: - - -config.file=/etc/promtail/promtail.yml + - -config.file=/etc/promtail/promtail.yaml env: - name: HOSTNAME valueFrom: @@ -120,7 +120,7 @@ kind: ConfigMap metadata: name: promtail data: - promtail.yml: | + promtail.yaml: | clients: - external_labels: cluster: vald diff --git a/pkg/agent/core/faiss/service/faiss.go b/pkg/agent/core/faiss/service/faiss.go index a5b11e4f7c..737a31b080 100644 --- a/pkg/agent/core/faiss/service/faiss.go +++ b/pkg/agent/core/faiss/service/faiss.go @@ -42,6 +42,7 @@ import ( "github.com/vdaas/vald/internal/sync" "github.com/vdaas/vald/internal/sync/errgroup" "github.com/vdaas/vald/pkg/agent/internal/kvs" + "github.com/vdaas/vald/pkg/agent/internal/memstore" "github.com/vdaas/vald/pkg/agent/internal/metadata" "github.com/vdaas/vald/pkg/agent/internal/vqueue" ) @@ -49,6 +50,7 @@ import ( type ( Faiss interface { Start(ctx context.Context) <-chan error +<<<<<<< HEAD Train(nb int, xb []float32) error Insert(uuid string, xb []float32) error InsertWithTime(uuid string, vec []float32, t int64) error @@ -60,12 +62,27 @@ type ( Search(k, nq uint32, xq []float32) (*payload.Search_Response, error) Delete(uuid string) error DeleteWithTime(uuid string, t int64) error +======= + Search(k, nprobe, nq uint32, xq []float32) (*payload.Search_Response, error) + Insert(uuid string, vec []float32) (err error) + InsertWithTime(uuid string, vec []float32, t int64) (err error) + Update(uuid string, vec []float32) (err error) + UpdateWithTime(uuid string, vec []float32, t int64) (err error) + UpdateTimestamp(uuid string, ts int64, force bool) (err error) + Delete(uuid string) (err error) + DeleteWithTime(uuid string, t int64) (err error) +>>>>>>> 3ba0002d4 (Add UpdateTimestamp API (#2605)) Exists(uuid string) (uint32, bool) + CreateIndex(ctx context.Context) (err error) + SaveIndex(ctx context.Context) (err error) + CreateAndSaveIndex(ctx context.Context) (err error) + Train(nb int, vec []float32) (err error) IsIndexing() bool IsSaving() bool + Len() uint64 NumberOfCreateIndexExecution() uint64 NumberOfProactiveGCExecution() uint64 - Len() uint64 + UUIDs(context.Context) (uuids []string) InsertVQueueBufferLen() uint64 DeleteVQueueBufferLen() uint64 GetDimensionSize() int @@ -721,6 +738,10 @@ func (f *faiss) update(uuid string, vec []float32, t int64) (err error) { return f.insert(uuid, vec, t, false) } +func (f *faiss) UpdateTimestamp(uuid string, ts int64, force bool) (err error) { + return memstore.UpdateTimestamp(f.kvs, f.vq, uuid, ts, force, nil) +} + func (f *faiss) readyForUpdate(uuid string, vec []float32) (err error) { if len(uuid) == 0 { return errors.ErrUUIDNotFound(0) @@ -915,7 +936,7 @@ func (f *faiss) saveIndex(ctx context.Context) error { // no cleanup invalid index eg, ectx := errgroup.New(ctx) - // we want to ensure the acutal kvs size between kvsdb and metadata, + // we want to ensure the actual kvs size between kvsdb and metadata, // so we create this counter to count the actual kvs size instead of using kvs.Len() var ( kvsLen uint64 @@ -931,10 +952,14 @@ func (f *faiss) saveIndex(ctx context.Context) error { f.smu.Lock() defer f.smu.Unlock() - eg.Go(safety.RecoverFunc(func() (err error) { - if f.kvs.Len() > 0 && path != "" { + if f.kvs.Len() > 0 && path != "" { + eg.Go(safety.RecoverFunc(func() (err error) { m := make(map[string]uint32, f.Len()) mt := make(map[string]int64, f.Len()) + defer func() { + m = nil + mt = nil + }() var mu sync.Mutex f.kvs.Range(ectx, func(key string, id uint32, ts int64) bool { @@ -1007,10 +1032,10 @@ func (f *faiss) saveIndex(ctx context.Context) error { } mt = make(map[string]int64) - } - return nil - })) + return nil + })) + } eg.Go(safety.RecoverFunc(func() (err error) { f.fmu.Lock() @@ -1031,7 +1056,7 @@ func (f *faiss) saveIndex(ctx context.Context) error { if fi != nil { derr := fi.Close() if derr != nil { - err = errors.Wrap(err, derr.Error()) + err = errors.Join(err, derr) } } }() @@ -1043,7 +1068,6 @@ func (f *faiss) saveIndex(ctx context.Context) error { if err != nil { return err } - err = fi.Sync() if err != nil { return err @@ -1161,7 +1185,8 @@ func (f *faiss) delete(uuid string, t int64, validation bool) error { if validation { _, _, ok := f.kvs.Get(uuid) - if !ok && !f.vq.IVExists(uuid) { + _, ivqok := f.vq.IVExists(uuid) + if !ok && !ivqok { return errors.ErrObjectIDNotFound(uuid) } } @@ -1169,27 +1194,12 @@ func (f *faiss) delete(uuid string, t int64, validation bool) error { return f.vq.PushDelete(uuid, t) } -func (f *faiss) Exists(uuid string) (uint32, bool) { - var ( - oid uint32 - ok bool - ) - - ok = f.vq.IVExists(uuid) - if !ok { - oid, _, ok = f.kvs.Get(uuid) - if !ok { - log.Debugf("Exists\tuuid: %s's data not found in kvsdb and insert vqueue\terror: %v", uuid, errors.ErrObjectIDNotFound(uuid)) - return 0, false - } - if f.vq.DVExists(uuid) { - log.Debugf("Exists\tuuid: %s's data found in kvsdb and not found in insert vqueue, but delete vqueue data exists. the object will be delete soon\terror: %v", - uuid, errors.ErrObjectIDNotFound(uuid)) - return 0, false - } - } +func (f *faiss) Exists(uuid string) (oid uint32, ok bool) { + return memstore.Exists(f.kvs, f.vq, uuid) +} - return oid, ok +func (f *faiss) GetObject(uuid string) (vec []float32, timestamp int64, err error) { + return memstore.GetObject(f.kvs, f.vq, uuid, nil) } func (f *faiss) IsIndexing() bool { @@ -1202,6 +1212,10 @@ func (f *faiss) IsSaving() bool { return s && ok } +func (f *faiss) UUIDs(ctx context.Context) (uuids []string) { + return memstore.UUIDs(ctx, f.kvs, f.vq) +} + func (f *faiss) NumberOfCreateIndexExecution() uint64 { return atomic.LoadUint64(&f.nocie) } @@ -1237,8 +1251,20 @@ func (f *faiss) GetTrainSize() int { return f.trainSize } -func (f *faiss) Close(ctx context.Context) error { - err := f.kvs.Close() +func (f *faiss) Close(ctx context.Context) (err error) { + defer f.core.Close() + defer func() { + kerr := f.kvs.Close() + if kerr != nil && + !errors.Is(err, context.Canceled) && + !errors.Is(err, context.DeadlineExceeded) { + if err != nil { + err = errors.Join(kerr, err) + } else { + err = kerr + } + } + }() if len(f.path) != 0 { cerr := f.CreateIndex(ctx) if cerr != nil && @@ -1246,28 +1272,33 @@ func (f *faiss) Close(ctx context.Context) error { !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { if err != nil { - err = errors.Wrap(cerr, err.Error()) + err = errors.Join(cerr, err) } else { err = cerr } } - serr := f.SaveIndex(ctx) if serr != nil && !errors.Is(err, errors.ErrUncommittedIndexNotFound) && !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { if err != nil { - err = errors.Wrap(serr, err.Error()) + err = errors.Join(serr, err) } else { err = serr } } } + return err +} - f.core.Close() - - return nil +// ListObjectFunc applies the input function on each index stored in the kvs and vqueue. +// Use this function for performing something on each object with caring about the memory usage. +// If the vector exists in the vqueue, this vector is not indexed so the oid(object ID) is processed as 0. +func (f *faiss) ListObjectFunc( + ctx context.Context, fn func(uuid string, oid uint32, ts int64) bool, +) { + memstore.ListObjectFunc(ctx, f.kvs, f.vq, fn) } func (f *faiss) toSearchResponse( diff --git a/pkg/agent/core/faiss/service/faiss_test.go b/pkg/agent/core/faiss/service/faiss_test.go index 4911a58c52..8d730a5d3e 100644 --- a/pkg/agent/core/faiss/service/faiss_test.go +++ b/pkg/agent/core/faiss/service/faiss_test.go @@ -3001,10 +3001,11 @@ package service // } // } // -// func Test_faiss_readyForUpdate(t *testing.T) { +// func Test_faiss_UpdateTimestamp(t *testing.T) { // type args struct { -// uuid string -// vec []float32 +// uuid string +// ts int64 +// force bool // } // type fields struct { // core core.Faiss @@ -3069,7 +3070,8 @@ package service // name: "test_case_1", // args: args { // uuid:"", -// vec:nil, +// ts:0, +// force:false, // }, // fields: fields { // core:nil, @@ -3127,7 +3129,8 @@ package service // name: "test_case_2", // args: args { // uuid:"", -// vec:nil, +// ts:0, +// force:false, // }, // fields: fields { // core:nil, @@ -3234,7 +3237,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.readyForUpdate(test.args.uuid, test.args.vec) +// err := f.UpdateTimestamp(test.args.uuid, test.args.ts, test.args.force) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -3242,9 +3245,10 @@ package service // } // } // -// func Test_faiss_CreateIndex(t *testing.T) { +// func Test_faiss_readyForUpdate(t *testing.T) { // type args struct { -// ctx context.Context +// uuid string +// vec []float32 // } // type fields struct { // core core.Faiss @@ -3308,7 +3312,8 @@ package service // { // name: "test_case_1", // args: args { -// ctx:nil, +// uuid:"", +// vec:nil, // }, // fields: fields { // core:nil, @@ -3365,7 +3370,8 @@ package service // return test { // name: "test_case_2", // args: args { -// ctx:nil, +// uuid:"", +// vec:nil, // }, // fields: fields { // core:nil, @@ -3472,7 +3478,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.CreateIndex(test.args.ctx) +// err := f.readyForUpdate(test.args.uuid, test.args.vec) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -3480,7 +3486,7 @@ package service // } // } // -// func Test_faiss_SaveIndex(t *testing.T) { +// func Test_faiss_CreateIndex(t *testing.T) { // type args struct { // ctx context.Context // } @@ -3710,7 +3716,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.SaveIndex(test.args.ctx) +// err := f.CreateIndex(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -3718,7 +3724,7 @@ package service // } // } // -// func Test_faiss_saveIndex(t *testing.T) { +// func Test_faiss_SaveIndex(t *testing.T) { // type args struct { // ctx context.Context // } @@ -3948,7 +3954,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.saveIndex(test.args.ctx) +// err := f.SaveIndex(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -3956,7 +3962,7 @@ package service // } // } // -// func Test_faiss_moveAndSwitchSavedData(t *testing.T) { +// func Test_faiss_saveIndex(t *testing.T) { // type args struct { // ctx context.Context // } @@ -4186,7 +4192,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.moveAndSwitchSavedData(test.args.ctx) +// err := f.saveIndex(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -4194,7 +4200,7 @@ package service // } // } // -// func Test_faiss_CreateAndSaveIndex(t *testing.T) { +// func Test_faiss_moveAndSwitchSavedData(t *testing.T) { // type args struct { // ctx context.Context // } @@ -4424,7 +4430,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.CreateAndSaveIndex(test.args.ctx) +// err := f.moveAndSwitchSavedData(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -4432,11 +4438,9 @@ package service // } // } // -// func Test_faiss_Search(t *testing.T) { +// func Test_faiss_CreateAndSaveIndex(t *testing.T) { // type args struct { -// k uint32 -// nq uint32 -// xq []float32 +// ctx context.Context // } // type fields struct { // core core.Faiss @@ -4477,25 +4481,21 @@ package service // kvsdbConcurrency int // } // type want struct { -// wantRes *payload.Search_Response -// err error +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, *payload.Search_Response, error) error +// checkFunc func(want, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { +// defaultCheckFunc := func(w want, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } -// if !reflect.DeepEqual(gotRes, w.wantRes) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) -// } // return nil // } // tests := []test{ @@ -4504,9 +4504,7 @@ package service // { // name: "test_case_1", // args: args { -// k:0, -// nq:0, -// xq:nil, +// ctx:nil, // }, // fields: fields { // core:nil, @@ -4563,9 +4561,7 @@ package service // return test { // name: "test_case_2", // args: args { -// k:0, -// nq:0, -// xq:nil, +// ctx:nil, // }, // fields: fields { // core:nil, @@ -4672,17 +4668,20 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// gotRes, err := f.Search(test.args.k, test.args.nq, test.args.xq) -// if err := checkFunc(test.want, gotRes, err); err != nil { +// err := f.CreateAndSaveIndex(test.args.ctx) +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_Delete(t *testing.T) { +// func Test_faiss_Search(t *testing.T) { // type args struct { -// uuid string +// k uint32 +// nprobe uint32 +// nq uint32 +// xq []float32 // } // type fields struct { // core core.Faiss @@ -4723,21 +4722,25 @@ package service // kvsdbConcurrency int // } // type want struct { -// err error +// wantRes *payload.Search_Response +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, error) error +// checkFunc func(want, *payload.Search_Response, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, err error) error { +// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } // return nil // } // tests := []test{ @@ -4746,7 +4749,10 @@ package service // { // name: "test_case_1", // args: args { -// uuid:"", +// k:0, +// nprobe:0, +// nq:0, +// xq:nil, // }, // fields: fields { // core:nil, @@ -4803,7 +4809,10 @@ package service // return test { // name: "test_case_2", // args: args { -// uuid:"", +// k:0, +// nprobe:0, +// nq:0, +// xq:nil, // }, // fields: fields { // core:nil, @@ -4910,18 +4919,17 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.Delete(test.args.uuid) -// if err := checkFunc(test.want, err); err != nil { +// gotRes, err := f.Search(test.args.k, test.args.nprobe, test.args.nq, test.args.xq) +// if err := checkFunc(test.want, gotRes, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_DeleteWithTime(t *testing.T) { +// func Test_faiss_Delete(t *testing.T) { // type args struct { // uuid string -// t int64 // } // type fields struct { // core core.Faiss @@ -4986,7 +4994,6 @@ package service // name: "test_case_1", // args: args { // uuid:"", -// t:0, // }, // fields: fields { // core:nil, @@ -5044,7 +5051,6 @@ package service // name: "test_case_2", // args: args { // uuid:"", -// t:0, // }, // fields: fields { // core:nil, @@ -5151,7 +5157,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.DeleteWithTime(test.args.uuid, test.args.t) +// err := f.Delete(test.args.uuid) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -5159,11 +5165,10 @@ package service // } // } // -// func Test_faiss_delete(t *testing.T) { +// func Test_faiss_DeleteWithTime(t *testing.T) { // type args struct { -// uuid string -// t int64 -// validation bool +// uuid string +// t int64 // } // type fields struct { // core core.Faiss @@ -5229,7 +5234,6 @@ package service // args: args { // uuid:"", // t:0, -// validation:false, // }, // fields: fields { // core:nil, @@ -5288,7 +5292,6 @@ package service // args: args { // uuid:"", // t:0, -// validation:false, // }, // fields: fields { // core:nil, @@ -5395,7 +5398,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.delete(test.args.uuid, test.args.t, test.args.validation) +// err := f.DeleteWithTime(test.args.uuid, test.args.t) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -5403,9 +5406,11 @@ package service // } // } // -// func Test_faiss_Exists(t *testing.T) { +// func Test_faiss_delete(t *testing.T) { // type args struct { -// uuid string +// uuid string +// t int64 +// validation bool // } // type fields struct { // core core.Faiss @@ -5446,24 +5451,20 @@ package service // kvsdbConcurrency int // } // type want struct { -// want uint32 -// want1 bool +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, uint32, bool) error +// checkFunc func(want, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got uint32, got1 bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) -// } -// if !reflect.DeepEqual(got1, w.want1) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got1, w.want1) +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } // return nil // } @@ -5474,6 +5475,8 @@ package service // name: "test_case_1", // args: args { // uuid:"", +// t:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -5531,6 +5534,8 @@ package service // name: "test_case_2", // args: args { // uuid:"", +// t:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -5637,15 +5642,18 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got, got1 := f.Exists(test.args.uuid) -// if err := checkFunc(test.want, got, got1); err != nil { +// err := f.delete(test.args.uuid, test.args.t, test.args.validation) +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_IsIndexing(t *testing.T) { +// func Test_faiss_Exists(t *testing.T) { +// type args struct { +// uuid string +// } // type fields struct { // core core.Faiss // eg errgroup.Group @@ -5685,19 +5693,24 @@ package service // kvsdbConcurrency int // } // type want struct { -// want bool +// wantOid uint32 +// wantOk bool // } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, bool) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, uint32, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotOid uint32, gotOk bool) error { +// if !reflect.DeepEqual(gotOid, w.wantOid) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOid, w.wantOid) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -5706,6 +5719,9 @@ package service // /* // { // name: "test_case_1", +// args: args { +// uuid:"", +// }, // fields: fields { // core:nil, // eg:nil, @@ -5746,10 +5762,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -5760,6 +5776,9 @@ package service // func() test { // return test { // name: "test_case_2", +// args: args { +// uuid:"", +// }, // fields: fields { // core:nil, // eg:nil, @@ -5800,10 +5819,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -5817,10 +5836,10 @@ package service // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -5865,15 +5884,18 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got := f.IsIndexing() -// if err := checkFunc(test.want, got); err != nil { +// gotOid, gotOk := f.Exists(test.args.uuid) +// if err := checkFunc(test.want, gotOid, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_IsSaving(t *testing.T) { +// func Test_faiss_GetObject(t *testing.T) { +// type args struct { +// uuid string +// } // type fields struct { // core core.Faiss // eg errgroup.Group @@ -5913,19 +5935,28 @@ package service // kvsdbConcurrency int // } // type want struct { -// want bool +// wantVec []float32 +// wantTimestamp int64 +// err error // } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, bool) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, []float32, int64, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotVec []float32, gotTimestamp int64, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) // } // return nil // } @@ -5934,6 +5965,9 @@ package service // /* // { // name: "test_case_1", +// args: args { +// uuid:"", +// }, // fields: fields { // core:nil, // eg:nil, @@ -5974,10 +6008,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -5988,6 +6022,9 @@ package service // func() test { // return test { // name: "test_case_2", +// args: args { +// uuid:"", +// }, // fields: fields { // core:nil, // eg:nil, @@ -6028,10 +6065,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -6045,10 +6082,10 @@ package service // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -6093,15 +6130,15 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got := f.IsSaving() -// if err := checkFunc(test.want, got); err != nil { +// gotVec, gotTimestamp, err := f.GetObject(test.args.uuid) +// if err := checkFunc(test.want, gotVec, gotTimestamp, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_NumberOfCreateIndexExecution(t *testing.T) { +// func Test_faiss_IsIndexing(t *testing.T) { // type fields struct { // core core.Faiss // eg errgroup.Group @@ -6141,17 +6178,17 @@ package service // kvsdbConcurrency int // } // type want struct { -// want uint64 +// want bool // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, uint64) error +// checkFunc func(want, bool) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got uint64) error { +// defaultCheckFunc := func(w want, got bool) error { // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -6321,7 +6358,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got := f.NumberOfCreateIndexExecution() +// got := f.IsIndexing() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -6329,7 +6366,7 @@ package service // } // } // -// func Test_faiss_NumberOfProactiveGCExecution(t *testing.T) { +// func Test_faiss_IsSaving(t *testing.T) { // type fields struct { // core core.Faiss // eg errgroup.Group @@ -6369,17 +6406,17 @@ package service // kvsdbConcurrency int // } // type want struct { -// want uint64 +// want bool // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, uint64) error +// checkFunc func(want, bool) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got uint64) error { +// defaultCheckFunc := func(w want, got bool) error { // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -6549,7 +6586,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got := f.NumberOfProactiveGCExecution() +// got := f.IsSaving() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -6557,7 +6594,10 @@ package service // } // } // -// func Test_faiss_gc(t *testing.T) { +// func Test_faiss_UUIDs(t *testing.T) { +// type args struct { +// ctx context.Context +// } // type fields struct { // core core.Faiss // eg errgroup.Group @@ -6596,16 +6636,22 @@ package service // idelay time.Duration // kvsdbConcurrency int // } -// type want struct{} +// type want struct { +// wantUuids []string +// } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, []string) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, gotUuids []string) error { +// if !reflect.DeepEqual(gotUuids, w.wantUuids) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotUuids, w.wantUuids) +// } // return nil // } // tests := []test{ @@ -6613,6 +6659,9 @@ package service // /* // { // name: "test_case_1", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -6653,10 +6702,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -6667,6 +6716,9 @@ package service // func() test { // return test { // name: "test_case_2", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -6707,10 +6759,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -6724,10 +6776,10 @@ package service // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -6772,15 +6824,15 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// f.gc() -// if err := checkFunc(test.want); err != nil { +// gotUuids := f.UUIDs(test.args.ctx) +// if err := checkFunc(test.want, gotUuids); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_Len(t *testing.T) { +// func Test_faiss_NumberOfCreateIndexExecution(t *testing.T) { // type fields struct { // core core.Faiss // eg errgroup.Group @@ -7000,7 +7052,686 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got := f.Len() +// got := f.NumberOfCreateIndexExecution() +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_faiss_NumberOfProactiveGCExecution(t *testing.T) { +// type fields struct { +// core core.Faiss +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// addVecs []float32 +// addIds []int64 +// isTrained bool +// trainSize int +// icnt uint64 +// indexing atomic.Value +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// inMem bool +// dim int +// nlist int +// m int +// alen int +// dur time.Duration +// sdur time.Duration +// lim time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// dcd bool +// idelay time.Duration +// kvsdbConcurrency int +// } +// type want struct { +// want uint64 +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, uint64) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, got uint64) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// f := &faiss{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// addVecs: test.fields.addVecs, +// addIds: test.fields.addIds, +// isTrained: test.fields.isTrained, +// trainSize: test.fields.trainSize, +// icnt: test.fields.icnt, +// indexing: test.fields.indexing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// nlist: test.fields.nlist, +// m: test.fields.m, +// alen: test.fields.alen, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// lim: test.fields.lim, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// dcd: test.fields.dcd, +// idelay: test.fields.idelay, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// } +// +// got := f.NumberOfProactiveGCExecution() +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_faiss_gc(t *testing.T) { +// type fields struct { +// core core.Faiss +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// addVecs []float32 +// addIds []int64 +// isTrained bool +// trainSize int +// icnt uint64 +// indexing atomic.Value +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// inMem bool +// dim int +// nlist int +// m int +// alen int +// dur time.Duration +// sdur time.Duration +// lim time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// dcd bool +// idelay time.Duration +// kvsdbConcurrency int +// } +// type want struct{} +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// f := &faiss{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// addVecs: test.fields.addVecs, +// addIds: test.fields.addIds, +// isTrained: test.fields.isTrained, +// trainSize: test.fields.trainSize, +// icnt: test.fields.icnt, +// indexing: test.fields.indexing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// nlist: test.fields.nlist, +// m: test.fields.m, +// alen: test.fields.alen, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// lim: test.fields.lim, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// dcd: test.fields.dcd, +// idelay: test.fields.idelay, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// } +// +// f.gc() +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_faiss_Len(t *testing.T) { +// type fields struct { +// core core.Faiss +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// addVecs []float32 +// addIds []int64 +// isTrained bool +// trainSize int +// icnt uint64 +// indexing atomic.Value +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// inMem bool +// dim int +// nlist int +// m int +// alen int +// dur time.Duration +// sdur time.Duration +// lim time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// dcd bool +// idelay time.Duration +// kvsdbConcurrency int +// } +// type want struct { +// want uint64 +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, uint64) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, got uint64) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// f := &faiss{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// addVecs: test.fields.addVecs, +// addIds: test.fields.addIds, +// isTrained: test.fields.isTrained, +// trainSize: test.fields.trainSize, +// icnt: test.fields.icnt, +// indexing: test.fields.indexing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// nlist: test.fields.nlist, +// m: test.fields.m, +// alen: test.fields.alen, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// lim: test.fields.lim, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// dcd: test.fields.dcd, +// idelay: test.fields.idelay, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// } +// +// got := f.Len() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -8158,6 +8889,242 @@ package service // } // } // +// func Test_faiss_ListObjectFunc(t *testing.T) { +// type args struct { +// ctx context.Context +// fn func(uuid string, oid uint32, ts int64) bool +// } +// type fields struct { +// core core.Faiss +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// addVecs []float32 +// addIds []int64 +// isTrained bool +// trainSize int +// icnt uint64 +// indexing atomic.Value +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// inMem bool +// dim int +// nlist int +// m int +// alen int +// dur time.Duration +// sdur time.Duration +// lim time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// dcd bool +// idelay time.Duration +// kvsdbConcurrency int +// } +// type want struct{} +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// fn:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// fn:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// f := &faiss{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// addVecs: test.fields.addVecs, +// addIds: test.fields.addIds, +// isTrained: test.fields.isTrained, +// trainSize: test.fields.trainSize, +// icnt: test.fields.icnt, +// indexing: test.fields.indexing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// nlist: test.fields.nlist, +// m: test.fields.m, +// alen: test.fields.alen, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// lim: test.fields.lim, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// dcd: test.fields.dcd, +// idelay: test.fields.idelay, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// } +// +// f.ListObjectFunc(test.args.ctx, test.args.fn) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_faiss_toSearchResponse(t *testing.T) { // type args struct { // sr []algorithm.SearchResult diff --git a/pkg/agent/core/ngt/handler/grpc/object_test.go b/pkg/agent/core/ngt/handler/grpc/object_test.go index 39b5521bbb..6ab2f52a9d 100644 --- a/pkg/agent/core/ngt/handler/grpc/object_test.go +++ b/pkg/agent/core/ngt/handler/grpc/object_test.go @@ -1300,7 +1300,7 @@ func Test_server_StreamListObject(t *testing.T) { // Call the method under test err = s.StreamListObject(&payload.Object_List_Request{}, &stream) - // Check the errros are joined and its a gRPC error + // Check the errors are joined and its a gRPC error require.ErrorContains(t, err, "foo") require.ErrorContains(t, err, "bar") _, ok := status.FromError(err) diff --git a/pkg/agent/core/ngt/handler/grpc/update.go b/pkg/agent/core/ngt/handler/grpc/update.go index b650c15e83..04508938e2 100644 --- a/pkg/agent/core/ngt/handler/grpc/update.go +++ b/pkg/agent/core/ngt/handler/grpc/update.go @@ -372,3 +372,100 @@ func (s *server) MultiUpdate( } return s.newLocations(uuids...), nil } + +func (s *server) UpdateTimestamp( + ctx context.Context, req *payload.Update_TimestampRequest, +) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.UpdateRPCServiceName+"/"+vald.UpdateTimestampRPCName), apiName+"/"+vald.UpdateTimestampRPCName) + defer func() { + if span != nil { + span.End() + } + }() + uuid := req.GetId() + reqInfo := &errdetails.RequestInfo{ + RequestId: uuid, + ServingData: errdetails.Serialize(req), + } + resInfo := &errdetails.ResourceInfo{ + ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateTimestampRPCName + "." + vald.GetObjectRPCName, + ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), + } + if len(uuid) == 0 { + err = errors.ErrInvalidUUID(uuid) + err = status.WrapWithInvalidArgument(vald.UpdateTimestampRPCName+" API invalid uuid", err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "invalid id", + Description: err.Error(), + }, + }, + }) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeInvalidArgument(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + ts := req.GetTimestamp() + if !req.GetForce() && ts < 0 { + err = errors.ErrInvalidTimestamp(ts) + err = status.WrapWithInvalidArgument(vald.UpdateTimestampRPCName+" API invalid vector argument", err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "timestamp", + Description: err.Error(), + }, + }, + }, info.Get()) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeInvalidArgument(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + err = s.ngt.UpdateTimestamp(uuid, ts, req.GetForce()) + if err != nil { + var attrs []attribute.KeyValue + if errors.Is(err, errors.ErrFlushingIsInProgress) { + err = status.WrapWithAborted(vald.UpdateTimestampRPCName+" API aborted to process update request due to flushing indices is in progress", err, reqInfo, resInfo) + log.Warn(err) + attrs = trace.StatusCodeAborted(err.Error()) + } else if errors.Is(err, errors.ErrObjectNotFound(nil, uuid)) { + err = status.WrapWithNotFound(fmt.Sprintf(vald.UpdateTimestampRPCName+" API uuid %s's data not found", uuid), err, reqInfo, resInfo) + log.Warn(err) + attrs = trace.StatusCodeNotFound(err.Error()) + } else if errors.Is(err, errors.ErrZeroTimestamp) || errors.Is(err, errors.ErrUUIDNotFound(0)) { + err = status.WrapWithInvalidArgument(fmt.Sprintf(vald.UpdateTimestampRPCName+" API invalid argument for uuid \"%s\" detected", uuid), err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "uuid, timestamp", + Description: err.Error(), + }, + }, + }) + log.Warn(err) + attrs = trace.StatusCodeInvalidArgument(err.Error()) + } else if errors.Is(err, errors.ErrNewerTimestampObjectAlreadyExists(uuid, ts)) { + err = status.WrapWithAlreadyExists(fmt.Sprintf(vald.UpdateTimestampRPCName+" API uuid %s's newer timestamp already exists", uuid), err, reqInfo, resInfo) + log.Warn(err) + attrs = trace.StatusCodeAlreadyExists(err.Error()) + } else { + err = status.WrapWithInternal(vald.UpdateTimestampRPCName+" API failed", err, reqInfo, resInfo, info.Get()) + log.Error(err) + attrs = trace.StatusCodeInternal(err.Error()) + } + if span != nil { + span.RecordError(err) + span.SetAttributes(attrs...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + return s.newLocation(uuid), nil +} diff --git a/pkg/agent/core/ngt/handler/grpc/update_test.go b/pkg/agent/core/ngt/handler/grpc/update_test.go index c2ef660f30..38bf394e4a 100644 --- a/pkg/agent/core/ngt/handler/grpc/update_test.go +++ b/pkg/agent/core/ngt/handler/grpc/update_test.go @@ -1117,3 +1117,132 @@ func Test_server_Update(t *testing.T) { // }) // } // } +// +// func Test_server_UpdateTimestamp(t *testing.T) { +// type args struct { +// ctx context.Context +// req *payload.Update_TimestampRequest +// } +// type fields struct { +// name string +// ip string +// ngt service.NGT +// eg errgroup.Group +// streamConcurrency int +// UnimplementedAgentServer agent.UnimplementedAgentServer +// UnimplementedValdServer vald.UnimplementedValdServer +// } +// type want struct { +// wantRes *payload.Object_Location +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Object_Location, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// req:nil, +// }, +// fields: fields { +// name:"", +// ip:"", +// ngt:nil, +// eg:nil, +// streamConcurrency:0, +// UnimplementedAgentServer:nil, +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// req:nil, +// }, +// fields: fields { +// name:"", +// ip:"", +// ngt:nil, +// eg:nil, +// streamConcurrency:0, +// UnimplementedAgentServer:nil, +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// s := &server{ +// name: test.fields.name, +// ip: test.fields.ip, +// ngt: test.fields.ngt, +// eg: test.fields.eg, +// streamConcurrency: test.fields.streamConcurrency, +// UnimplementedAgentServer: test.fields.UnimplementedAgentServer, +// UnimplementedValdServer: test.fields.UnimplementedValdServer, +// } +// +// gotRes, err := s.UpdateTimestamp(test.args.ctx, test.args.req) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/agent/core/ngt/service/ngt.go b/pkg/agent/core/ngt/service/ngt.go index 09bf8577f5..8a16fe978f 100644 --- a/pkg/agent/core/ngt/service/ngt.go +++ b/pkg/agent/core/ngt/service/ngt.go @@ -48,125 +48,129 @@ import ( "github.com/vdaas/vald/internal/sync" "github.com/vdaas/vald/internal/sync/errgroup" "github.com/vdaas/vald/pkg/agent/internal/kvs" + "github.com/vdaas/vald/pkg/agent/internal/memstore" "github.com/vdaas/vald/pkg/agent/internal/metadata" "github.com/vdaas/vald/pkg/agent/internal/vqueue" ) -type contextSaveIndexTimeKey string - -type NGT interface { - Start(ctx context.Context) <-chan error - Search(ctx context.Context, vec []float32, size uint32, epsilon, radius float32) (*payload.Search_Response, error) - SearchByID(ctx context.Context, uuid string, size uint32, epsilon, radius float32) ([]float32, *payload.Search_Response, error) - LinearSearch(ctx context.Context, vec []float32, size uint32) (*payload.Search_Response, error) - LinearSearchByID(ctx context.Context, uuid string, size uint32) ([]float32, *payload.Search_Response, error) - Insert(uuid string, vec []float32) (err error) - InsertWithTime(uuid string, vec []float32, t int64) (err error) - InsertMultiple(vecs map[string][]float32) (err error) - InsertMultipleWithTime(vecs map[string][]float32, t int64) (err error) - Update(uuid string, vec []float32) (err error) - UpdateWithTime(uuid string, vec []float32, t int64) (err error) - UpdateMultiple(vecs map[string][]float32) (err error) - UpdateMultipleWithTime(vecs map[string][]float32, t int64) (err error) - Delete(uuid string) (err error) - DeleteWithTime(uuid string, t int64) (err error) - DeleteMultiple(uuids ...string) (err error) - DeleteMultipleWithTime(uuids []string, t int64) (err error) - RegenerateIndexes(ctx context.Context) (err error) - GetObject(uuid string) (vec []float32, timestamp int64, err error) - ListObjectFunc(ctx context.Context, f func(uuid string, oid uint32, timestamp int64) bool) - CreateIndex(ctx context.Context, poolSize uint32) (err error) - SaveIndex(ctx context.Context) (err error) - Exists(string) (uint32, bool) - CreateAndSaveIndex(ctx context.Context, poolSize uint32) (err error) - IsIndexing() bool - IsFlushing() bool - IsSaving() bool - Len() uint64 - NumberOfCreateIndexExecution() uint64 - NumberOfProactiveGCExecution() uint64 - UUIDs(context.Context) (uuids []string) - DeleteVQueueBufferLen() uint64 - InsertVQueueBufferLen() uint64 - GetDimensionSize() int - BrokenIndexCount() uint64 - IndexStatistics() (*payload.Info_Index_Statistics, error) - IsStatisticsEnabled() bool - IndexProperty() (*payload.Info_Index_Property, error) - Close(ctx context.Context) error -} - -type ngt struct { - // instances - core core.NGT - eg errgroup.Group - kvs kvs.BidiMap - fmu sync.Mutex - fmap map[string]int64 // failure map for index - vq vqueue.Queue - - // statuses - indexing atomic.Value - flushing atomic.Bool - saving atomic.Value - cimu sync.Mutex // create index mutex - lastNocie uint64 // last number of create index execution this value prevent unnecessary saveindex. - - // counters - nocie uint64 // number of create index execution - nogce uint64 // number of proactive GC execution - wfci uint64 // wait for create indexing - nobic uint64 // number of broken index count - nopvq atomic.Uint64 // number of processed vq number - - // parameters - cfg *config.NGT - opts []Option - - // configurations - inMem bool // in-memory mode - dim int // dimension size - alen int // auto indexing length - - lim time.Duration // auto indexing time limit - dur time.Duration // auto indexing check duration - sdur time.Duration // auto save index check duration - - minLit time.Duration // minimum load index timeout - maxLit time.Duration // maximum load index timeout - litFactor time.Duration // load index timeout factor - - enableProactiveGC bool // if this value is true, agent component will purge GC memory more proactive - enableCopyOnWrite bool // if this value is true, agent component will write backup file using Copy on Write and saves old files to the old directory - - podName string - podNamespace string - path string // index path - smu sync.Mutex // save index lock - tmpPath atomic.Value // temporary index path for Copy on Write - oldPath string // old volume path - basePath string // index base directory for CoW - brokenPath string // backup broken index path - cowmu sync.Mutex // copy on write move lock - - poolSize uint32 // default pool size - radius float32 // default radius - epsilon float32 // default epsilon - - idelay time.Duration // initial delay duration - dcd bool // disable commit daemon - - kvsdbConcurrency int // kvsdb concurrency - historyLimit int // the maximum generation number of broken index backup - - isReadReplica bool - enableExportIndexInfo bool - exportIndexInfoDuration time.Duration - patcher client.Patcher - - enableStatistics bool - statisticsCache atomic.Pointer[payload.Info_Index_Statistics] -} +type ( + NGT interface { + Start(ctx context.Context) <-chan error + Search(ctx context.Context, vec []float32, size uint32, epsilon, radius float32) (*payload.Search_Response, error) + SearchByID(ctx context.Context, uuid string, size uint32, epsilon, radius float32) ([]float32, *payload.Search_Response, error) + LinearSearch(ctx context.Context, vec []float32, size uint32) (*payload.Search_Response, error) + LinearSearchByID(ctx context.Context, uuid string, size uint32) ([]float32, *payload.Search_Response, error) + Insert(uuid string, vec []float32) (err error) + InsertWithTime(uuid string, vec []float32, t int64) (err error) + InsertMultiple(vecs map[string][]float32) (err error) + InsertMultipleWithTime(vecs map[string][]float32, t int64) (err error) + Update(uuid string, vec []float32) (err error) + UpdateWithTime(uuid string, vec []float32, t int64) (err error) + UpdateMultiple(vecs map[string][]float32) (err error) + UpdateMultipleWithTime(vecs map[string][]float32, t int64) (err error) + UpdateTimestamp(uuid string, ts int64, force bool) (err error) + Delete(uuid string) (err error) + DeleteWithTime(uuid string, t int64) (err error) + DeleteMultiple(uuids ...string) (err error) + DeleteMultipleWithTime(uuids []string, t int64) (err error) + RegenerateIndexes(ctx context.Context) (err error) + GetObject(uuid string) (vec []float32, timestamp int64, err error) + ListObjectFunc(ctx context.Context, f func(uuid string, oid uint32, timestamp int64) bool) + Exists(uuid string) (uint32, bool) + CreateIndex(ctx context.Context, poolSize uint32) (err error) + SaveIndex(ctx context.Context) (err error) + CreateAndSaveIndex(ctx context.Context, poolSize uint32) (err error) + IsIndexing() bool + IsFlushing() bool + IsSaving() bool + Len() uint64 + NumberOfCreateIndexExecution() uint64 + NumberOfProactiveGCExecution() uint64 + UUIDs(context.Context) (uuids []string) + InsertVQueueBufferLen() uint64 + DeleteVQueueBufferLen() uint64 + GetDimensionSize() int + BrokenIndexCount() uint64 + IndexStatistics() (*payload.Info_Index_Statistics, error) + IsStatisticsEnabled() bool + IndexProperty() (*payload.Info_Index_Property, error) + Close(ctx context.Context) error + } + + ngt struct { + // instances + core core.NGT + eg errgroup.Group + kvs kvs.BidiMap + fmu sync.Mutex + fmap map[string]int64 // failure map for index + vq vqueue.Queue + + // statuses + indexing atomic.Value + flushing atomic.Bool + saving atomic.Value + cimu sync.Mutex // create index mutex + lastNocie uint64 // last number of create index execution this value prevent unnecessary saveindex. + + // counters + nocie uint64 // number of create index execution + nogce uint64 // number of proactive GC execution + wfci uint64 // wait for create indexing + nobic uint64 // number of broken index count + nopvq atomic.Uint64 // number of processed vq number + + // parameters + cfg *config.NGT + opts []Option + + // configurations + inMem bool // in-memory mode + dim int // dimension size + alen int // auto indexing length + + lim time.Duration // auto indexing time limit + dur time.Duration // auto indexing check duration + sdur time.Duration // auto save index check duration + + minLit time.Duration // minimum load index timeout + maxLit time.Duration // maximum load index timeout + litFactor time.Duration // load index timeout factor + + enableProactiveGC bool // if this value is true, agent component will purge GC memory more proactive + enableCopyOnWrite bool // if this value is true, agent component will write backup file using Copy on Write and saves old files to the old directory + + podName string + podNamespace string + path string // index path + smu sync.Mutex // save index lock + tmpPath atomic.Value // temporary index path for Copy on Write + oldPath string // old volume path + basePath string // index base directory for CoW + brokenPath string // backup broken index path + cowmu sync.Mutex // copy on write move lock + + poolSize uint32 // default pool size + radius float32 // default radius + epsilon float32 // default epsilon + + idelay time.Duration // initial delay duration + dcd bool // disable commit daemon + + kvsdbConcurrency int // kvsdb concurrency + historyLimit int // the maximum generation number of broken index backup + + isReadReplica bool + enableExportIndexInfo bool + exportIndexInfoDuration time.Duration + patcher client.Patcher + + enableStatistics bool + statisticsCache atomic.Pointer[payload.Info_Index_Statistics] + } + + contextSaveIndexTimeKey string +) const ( kvsFileName = "ngt-meta.kvsdb" @@ -551,7 +555,7 @@ func (n *ngt) load(ctx context.Context, path string, opts ...core.Option) (err e // backupBroken backup index at originPath into brokenDir. // The name of the directory will be timestamp(UnixNano). -// If it exeeds the limit, backupBroken removes the oldest backup directory. +// If it exceeds the limit, backupBroken removes the oldest backup directory. func (n *ngt) backupBroken(ctx context.Context) error { if n.historyLimit <= 0 { return nil @@ -1118,7 +1122,7 @@ func (n *ngt) UpdateWithTime(uuid string, vec []float32, t int64) (err error) { } func (n *ngt) update(uuid string, vec []float32, t int64) (err error) { - if err = n.readyForUpdate(uuid, vec); err != nil { + if err = n.readyForUpdate(uuid, vec, t); err != nil { return err } err = n.delete(uuid, t, true) // `true` is to return NotFound error with non-existent ID @@ -1149,7 +1153,7 @@ func (n *ngt) UpdateMultipleWithTime(vecs map[string][]float32, t int64) (err er func (n *ngt) updateMultiple(vecs map[string][]float32, t int64) (err error) { uuids := make([]string, 0, len(vecs)) for uuid, vec := range vecs { - if err = n.readyForUpdate(uuid, vec); err != nil { + if err = n.readyForUpdate(uuid, vec, t); err != nil { delete(vecs, uuid) } else { uuids = append(uuids, uuid) @@ -1163,6 +1167,15 @@ func (n *ngt) updateMultiple(vecs map[string][]float32, t int64) (err error) { return n.insertMultiple(vecs, t, false) } +func (n *ngt) UpdateTimestamp(uuid string, ts int64, force bool) (err error) { + if n.IsFlushing() { + return errors.ErrFlushingIsInProgress + } + return memstore.UpdateTimestamp(n.kvs, n.vq, uuid, ts, force, func(oid uint32) ([]float32, error) { + return n.core.GetVector(uint(oid)) + }) +} + func (n *ngt) Delete(uuid string) (err error) { if n.IsFlushing() { return errors.ErrFlushingIsInProgress @@ -1187,7 +1200,10 @@ func (n *ngt) delete(uuid string, t int64, validation bool) (err error) { } if validation { _, _, ok := n.kvs.Get(uuid) - if !ok && !n.vq.IVExists(uuid) { + if !ok && func() (ok bool) { + _, ok = n.vq.IVExists(uuid) + return !ok + }() { return errors.ErrObjectIDNotFound(uuid) } } @@ -1349,7 +1365,7 @@ func (n *ngt) CreateIndex(ctx context.Context, poolSize uint32) (err error) { } log.Infof("create index operation started, uncommitted indexes = %d", ic) log.Debug("create index delete phase started") - // vqProcessedCnt is a tempral counter to store the number of processed vqueue items. + // vqProcessedCnt is a temporary counter to store the number of processed vqueue items. // This will be added to nopvq after CreateIndex operation succeeds. var vqProcessedCnt uint64 n.vq.RangePopDelete(ctx, now, func(uuid string) bool { @@ -1360,7 +1376,7 @@ func (n *ngt) CreateIndex(ctx context.Context, poolSize uint32) (err error) { return true } log.Debugf("start remove operation for ngt index id: %s, oid: %d", uuid, oid) - if err := n.core.Remove(uint(oid)); err != nil { + if err = n.core.Remove(uint(oid)); err != nil { log.Errorf("failed to remove oid: %d from ngt index. error: %v", oid, err) n.fmu.Lock() n.fmap[uuid] = int64(oid) @@ -1377,7 +1393,8 @@ func (n *ngt) CreateIndex(ctx context.Context, poolSize uint32) (err error) { var icnt uint32 n.vq.RangePopInsert(ctx, now, func(uuid string, vector []float32, timestamp int64) bool { log.Debugf("start insert operation for ngt index id: %s", uuid) - oid, err := n.core.Insert(vector) + var oid uint + oid, err = n.core.Insert(vector) if err != nil { log.Warnf("failed to insert vector uuid: %s vec: %v to ngt index. error: %v", uuid, vector, err) if errors.Is(err, errors.ErrIncompatibleDimensionSize(len(vector), n.dim)) { @@ -1578,7 +1595,7 @@ func (n *ngt) saveIndex(ctx context.Context) (err error) { beforeNopvq := n.nopvq.Load() defer n.gc() - // since defering here, atomic operations are guaranteed in this scope + // since deferring here, atomic operations are guaranteed in this scope defer n.saving.Store(false) log.Debug("cleanup invalid index started") @@ -1586,7 +1603,7 @@ func (n *ngt) saveIndex(ctx context.Context) (err error) { log.Debug("cleanup invalid index finished") eg, ectx := errgroup.New(ctx) - // we want to ensure the acutal kvs size between kvsdb and metadata, + // we want to ensure the actual kvs size between kvsdb and metadata, // so we create this counter to count the actual kvs size instead of using kvs.Len() var ( kvsLen uint64 @@ -1838,59 +1855,23 @@ func (n *ngt) Exists(uuid string) (oid uint32, ok bool) { uuid, errors.ErrFlushingIsInProgress) return 0, false } - ok = n.vq.IVExists(uuid) - if !ok { - oid, _, ok = n.kvs.Get(uuid) - if !ok { - log.Debugf("Exists\tuuid: %s's data not found in kvsdb and insert vqueue\terror: %v", uuid, errors.ErrObjectIDNotFound(uuid)) - return 0, false - } - if n.vq.DVExists(uuid) { - log.Debugf( - "Exists\tuuid: %s's data found in kvsdb and not found in insert vqueue, but delete vqueue data exists. the object will be delete soon\terror: %v", - uuid, - errors.ErrObjectIDNotFound(uuid), - ) - return 0, false - } - } - return oid, ok + return memstore.Exists(n.kvs, n.vq, uuid) } func (n *ngt) GetObject(uuid string) (vec []float32, timestamp int64, err error) { - vec, ts, exists := n.vq.GetVector(uuid) - if exists { - return vec, ts, nil - } - - oid, ts, ok := n.kvs.Get(uuid) - if !ok { - log.Debugf("GetObject\tuuid: %s's data not found in kvsdb and insert vqueue", uuid) - return nil, 0, errors.ErrObjectIDNotFound(uuid) - } - - if n.vq.DVExists(uuid) { - log.Debugf("GetObject\tuuid: %s's data found in kvsdb and not found in insert vqueue, but delete vqueue data exists. the object will be delete soon", uuid) - return nil, 0, errors.ErrObjectIDNotFound(uuid) - } - - vec, err = n.core.GetVector(uint(oid)) - if err != nil { - log.Debugf("GetObject\tuuid: %s oid: %d's vector not found in ngt index", uuid, oid) - return nil, 0, errors.ErrObjectNotFound(err, uuid) - } - - return vec, ts, nil + return memstore.GetObject(n.kvs, n.vq, uuid, func(oid uint32) ([]float32, error) { + return n.core.GetVector(uint(oid)) + }) } -func (n *ngt) readyForUpdate(uuid string, vec []float32) (err error) { +func (n *ngt) readyForUpdate(uuid string, vec []float32, ts int64) (err error) { if len(uuid) == 0 { return errors.ErrUUIDNotFound(0) } if len(vec) != n.GetDimensionSize() { return errors.ErrInvalidDimensionSize(len(vec), n.GetDimensionSize()) } - ovec, _, err := n.GetObject(uuid) + ovec, ots, err := n.GetObject(uuid) // if error (GetObject cannot find vector) return error if err != nil { return err @@ -1899,6 +1880,14 @@ func (n *ngt) readyForUpdate(uuid string, vec []float32) (err error) { if len(vec) != len(ovec) || conv.F32stos(vec) != conv.F32stos(ovec) { return nil } + + if ots < ts { + err = n.UpdateTimestamp(uuid, ts, false) + if err != nil { + return err + } + } + // if no difference exists (same vector already exists) return error for skip update return errors.ErrUUIDAlreadyExists(uuid) } @@ -1918,15 +1907,7 @@ func (n *ngt) IsFlushing() bool { } func (n *ngt) UUIDs(ctx context.Context) (uuids []string) { - uuids = make([]string, 0, n.kvs.Len()) - var mu sync.Mutex - n.kvs.Range(ctx, func(uuid string, oid uint32, _ int64) bool { - mu.Lock() - uuids = append(uuids, uuid) - mu.Unlock() - return true - }) - return uuids + return memstore.UUIDs(ctx, n.kvs, n.vq) } func (n *ngt) NumberOfCreateIndexExecution() uint64 { @@ -2026,25 +2007,7 @@ func (n *ngt) BrokenIndexCount() uint64 { // Use this function for performing something on each object with caring about the memory usage. // If the vector exists in the vqueue, this vector is not indexed so the oid(object ID) is processed as 0. func (n *ngt) ListObjectFunc(ctx context.Context, f func(uuid string, oid uint32, ts int64) bool) { - dup := make(map[string]bool) - n.vq.Range(ctx, func(uuid string, vec []float32, ts int64) (ok bool) { - ok = f(uuid, 0, ts) - if !ok { - return false - } - var kts int64 - _, kts, ok = n.kvs.Get(uuid) - if ok && ts > kts { - dup[uuid] = true - } - return true - }) - n.kvs.Range(ctx, func(uuid string, oid uint32, ts int64) (ok bool) { - if dup[uuid] { - return true - } - return f(uuid, oid, ts) - }) + memstore.ListObjectFunc(ctx, n.kvs, n.vq, f) } func (n *ngt) IndexStatistics() (stats *payload.Info_Index_Statistics, err error) { diff --git a/pkg/agent/core/ngt/service/ngt_test.go b/pkg/agent/core/ngt/service/ngt_test.go index 71eb5d3b2e..8b2b1cbf79 100644 --- a/pkg/agent/core/ngt/service/ngt_test.go +++ b/pkg/agent/core/ngt/service/ngt_test.go @@ -36,6 +36,7 @@ import ( core "github.com/vdaas/vald/internal/core/algorithm/ngt" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/file" + "github.com/vdaas/vald/internal/k8s/vald" kvald "github.com/vdaas/vald/internal/k8s/vald" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" @@ -650,7 +651,7 @@ func Test_needsBackup(t *testing.T) { tmpDir := t.TempDir() validIndexDir := testdata.GetTestdataPath(testdata.ValidIndex) return test{ - name: "returns false when it's an initaial state", + name: "returns false when it's an initial state", args: args{ path: tmpDir, }, @@ -707,7 +708,7 @@ func Test_needsBackup(t *testing.T) { tmpDir := t.TempDir() validIndexDir := testdata.GetTestdataPath(testdata.ValidIndex) return test{ - name: "returns true when mets.IsInvalid is true", + name: "returns true when meta.IsInvalid is true", args: args{ path: tmpDir, }, @@ -738,7 +739,7 @@ func Test_needsBackup(t *testing.T) { tmpDir := t.TempDir() validIndexDir := testdata.GetTestdataPath(testdata.ValidIndex) return test{ - name: "returns true when mets.IsInvalid is true", + name: "returns true when meta.IsInvalid is true", args: args{ path: tmpDir, }, @@ -1370,7 +1371,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { return } type args struct { - idxes []index + indices []index poolSize uint32 bulkSize int } @@ -1444,7 +1445,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { return test{ name: fmt.Sprintf("insert & upsert %d random and 11 digits added to each vector element", count), args: args{ - idxes: createRandomData(count, &createRandomDataConfig{ + indices: createRandomData(count, &createRandomDataConfig{ additionaldigits: 11, }), poolSize: uint32(count / 10), @@ -1490,7 +1491,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { if err != nil { tt.Errorf("failed to init ngt service, error = %v", err) } - for _, idx := range test.args.idxes { + for _, idx := range test.args.indices { err = n.Insert(idx.uuid, idx.vec) if err := checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) @@ -1508,7 +1509,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { idx := i eg.Go(safety.RecoverFunc(func() error { log.Warnf("started %d-1", idx) - for _, idx := range test.args.idxes[:len(test.args.idxes)/3] { + for _, idx := range test.args.indices[:len(test.args.indices)/3] { _ = n.Delete(idx.uuid) _ = n.Insert(idx.uuid, idx.vec) } @@ -1518,7 +1519,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { eg.Go(safety.RecoverFunc(func() error { log.Warnf("started %d-2", idx) - for _, idx := range test.args.idxes[len(test.args.idxes)/3 : 2*len(test.args.idxes)/3] { + for _, idx := range test.args.indices[len(test.args.indices)/3 : 2*len(test.args.indices)/3] { _ = n.Delete(idx.uuid) _ = n.Insert(idx.uuid, idx.vec) } @@ -1528,7 +1529,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { eg.Go(safety.RecoverFunc(func() error { log.Warnf("started %d-3", idx) - for _, idx := range test.args.idxes[2*len(test.args.idxes)/3:] { + for _, idx := range test.args.indices[2*len(test.args.indices)/3:] { _ = n.Delete(idx.uuid) _ = n.Insert(idx.uuid, idx.vec) } @@ -1569,9 +1570,9 @@ func Test_ngt_E2E(t *testing.T) { beforeFunc func(args) afterFunc func(args) } - multiUpsertRequestGenFunc := func(idxes []index, chunk int) (res []*payload.Upsert_MultiRequest) { + multiUpsertRequestGenFunc := func(indices []index, chunk int) (res []*payload.Upsert_MultiRequest) { reqs := make([]*payload.Upsert_Request, 0, chunk) - for i := 0; i < len(idxes); i++ { + for i := 0; i < len(indices); i++ { if len(reqs) == chunk-1 { res = append(res, &payload.Upsert_MultiRequest{ Requests: reqs, @@ -1580,8 +1581,8 @@ func Test_ngt_E2E(t *testing.T) { } else { reqs = append(reqs, &payload.Upsert_Request{ Vector: &payload.Object_Vector{ - Id: idxes[i].uuid, - Vector: idxes[i].vec, + Id: indices[i].uuid, + Vector: indices[i].vec, }, Config: &payload.Upsert_Config{ SkipStrictExistCheck: true, @@ -8484,9 +8485,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_Delete(t *testing.T) { +// func Test_ngt_UpdateTimestamp(t *testing.T) { // type args struct { -// uuid string +// uuid string +// ts int64 +// force bool // } // type fields struct { // core core.NGT @@ -8562,6 +8565,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // uuid:"", +// ts:0, +// force:false, // }, // fields: fields { // core:nil, @@ -8630,6 +8635,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // uuid:"", +// ts:0, +// force:false, // }, // fields: fields { // core:nil, @@ -8758,7 +8765,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.Delete(test.args.uuid) +// err := n.UpdateTimestamp(test.args.uuid, test.args.ts, test.args.force) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -8766,10 +8773,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_DeleteWithTime(t *testing.T) { +// func Test_ngt_Delete(t *testing.T) { // type args struct { // uuid string -// t int64 // } // type fields struct { // core core.NGT @@ -8845,7 +8851,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // uuid:"", -// t:0, // }, // fields: fields { // core:nil, @@ -8914,7 +8919,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // uuid:"", -// t:0, // }, // fields: fields { // core:nil, @@ -9043,7 +9047,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.DeleteWithTime(test.args.uuid, test.args.t) +// err := n.Delete(test.args.uuid) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -9051,11 +9055,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_delete(t *testing.T) { +// func Test_ngt_DeleteWithTime(t *testing.T) { // type args struct { -// uuid string -// t int64 -// validation bool +// uuid string +// t int64 // } // type fields struct { // core core.NGT @@ -9132,7 +9135,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // args: args { // uuid:"", // t:0, -// validation:false, // }, // fields: fields { // core:nil, @@ -9202,7 +9204,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // args: args { // uuid:"", // t:0, -// validation:false, // }, // fields: fields { // core:nil, @@ -9331,7 +9332,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.delete(test.args.uuid, test.args.t, test.args.validation) +// err := n.DeleteWithTime(test.args.uuid, test.args.t) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -9339,9 +9340,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_DeleteMultiple(t *testing.T) { +// func Test_ngt_delete(t *testing.T) { // type args struct { -// uuids []string +// uuid string +// t int64 +// validation bool // } // type fields struct { // core core.NGT @@ -9416,7 +9419,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// uuids:nil, +// uuid:"", +// t:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -9484,7 +9489,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// uuids:nil, +// uuid:"", +// t:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -9613,7 +9620,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.DeleteMultiple(test.args.uuids...) +// err := n.delete(test.args.uuid, test.args.t, test.args.validation) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -9621,10 +9628,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_DeleteMultipleWithTime(t *testing.T) { +// func Test_ngt_DeleteMultiple(t *testing.T) { // type args struct { // uuids []string -// t int64 // } // type fields struct { // core core.NGT @@ -9700,7 +9706,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // uuids:nil, -// t:0, // }, // fields: fields { // core:nil, @@ -9769,7 +9774,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // uuids:nil, -// t:0, // }, // fields: fields { // core:nil, @@ -9898,7 +9902,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.DeleteMultipleWithTime(test.args.uuids, test.args.t) +// err := n.DeleteMultiple(test.args.uuids...) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -9906,11 +9910,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_deleteMultiple(t *testing.T) { +// func Test_ngt_DeleteMultipleWithTime(t *testing.T) { // type args struct { -// uuids []string -// now int64 -// validation bool +// uuids []string +// t int64 // } // type fields struct { // core core.NGT @@ -9986,8 +9989,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // uuids:nil, -// now:0, -// validation:false, +// t:0, // }, // fields: fields { // core:nil, @@ -10056,8 +10058,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // uuids:nil, -// now:0, -// validation:false, +// t:0, // }, // fields: fields { // core:nil, @@ -10186,7 +10187,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.deleteMultiple(test.args.uuids, test.args.now, test.args.validation) +// err := n.DeleteMultipleWithTime(test.args.uuids, test.args.t) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -10194,9 +10195,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_RegenerateIndexes(t *testing.T) { +// func Test_ngt_deleteMultiple(t *testing.T) { // type args struct { -// ctx context.Context +// uuids []string +// now int64 +// validation bool // } // type fields struct { // core core.NGT @@ -10271,7 +10274,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// ctx:nil, +// uuids:nil, +// now:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -10339,7 +10344,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// ctx:nil, +// uuids:nil, +// now:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -10468,7 +10475,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.RegenerateIndexes(test.args.ctx) +// err := n.deleteMultiple(test.args.uuids, test.args.now, test.args.validation) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -10476,7 +10483,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_removeInvalidIndex(t *testing.T) { +// func Test_ngt_RegenerateIndexes(t *testing.T) { // type args struct { // ctx context.Context // } @@ -10529,17 +10536,22 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // enableStatistics bool // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } -// type want struct{} +// type want struct { +// err error +// } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want) error +// checkFunc func(want, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } // return nil // } // tests := []test{ @@ -10745,15 +10757,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// n.removeInvalidIndex(test.args.ctx) -// if err := checkFunc(test.want); err != nil { +// err := n.RegenerateIndexes(test.args.ctx) +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_saveIndex(t *testing.T) { +// func Test_ngt_removeInvalidIndex(t *testing.T) { // type args struct { // ctx context.Context // } @@ -10806,22 +10818,17 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // enableStatistics bool // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } -// type want struct { -// err error -// } +// type want struct{} // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, error) error +// checkFunc func(want) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, err error) error { -// if !errors.Is(err, w.err) { -// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) -// } +// defaultCheckFunc := func(w want) error { // return nil // } // tests := []test{ @@ -11027,18 +11034,17 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.saveIndex(test.args.ctx) -// if err := checkFunc(test.want, err); err != nil { +// n.removeInvalidIndex(test.args.ctx) +// if err := checkFunc(test.want); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_CreateAndSaveIndex(t *testing.T) { +// func Test_ngt_saveIndex(t *testing.T) { // type args struct { -// ctx context.Context -// poolSize uint32 +// ctx context.Context // } // type fields struct { // core core.NGT @@ -11114,7 +11120,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // ctx:nil, -// poolSize:0, // }, // fields: fields { // core:nil, @@ -11183,7 +11188,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // ctx:nil, -// poolSize:0, // }, // fields: fields { // core:nil, @@ -11312,7 +11316,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.CreateAndSaveIndex(test.args.ctx, test.args.poolSize) +// err := n.saveIndex(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -11320,9 +11324,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_moveAndSwitchSavedData(t *testing.T) { +// func Test_ngt_CreateAndSaveIndex(t *testing.T) { // type args struct { -// ctx context.Context +// ctx context.Context +// poolSize uint32 // } // type fields struct { // core core.NGT @@ -11398,6 +11403,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // ctx:nil, +// poolSize:0, // }, // fields: fields { // core:nil, @@ -11466,6 +11472,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // ctx:nil, +// poolSize:0, // }, // fields: fields { // core:nil, @@ -11594,7 +11601,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.moveAndSwitchSavedData(test.args.ctx) +// err := n.CreateAndSaveIndex(test.args.ctx, test.args.poolSize) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -11602,7 +11609,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_mktmp(t *testing.T) { +// func Test_ngt_moveAndSwitchSavedData(t *testing.T) { +// type args struct { +// ctx context.Context +// } // type fields struct { // core core.NGT // eg errgroup.Group @@ -11657,11 +11667,12 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // type test struct { // name string +// args args // fields fields // want want // checkFunc func(want, error) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } // defaultCheckFunc := func(w want, err error) error { // if !errors.Is(err, w.err) { @@ -11674,6 +11685,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -11725,10 +11739,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -11739,6 +11753,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -11790,10 +11807,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -11807,10 +11824,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -11866,7 +11883,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.mktmp() +// err := n.moveAndSwitchSavedData(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -11874,10 +11891,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_Exists(t *testing.T) { -// type args struct { -// uuid string -// } +// func Test_ngt_mktmp(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -11928,24 +11942,19 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantOid uint32 -// wantOk bool +// err error // } // type test struct { // name string -// args args // fields fields // want want -// checkFunc func(want, uint32, bool) error -// beforeFunc func(*testing.T, args) -// afterFunc func(*testing.T, args) +// checkFunc func(want, error) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, gotOid uint32, gotOk bool) error { -// if !reflect.DeepEqual(gotOid, w.wantOid) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOid, w.wantOid) -// } -// if !reflect.DeepEqual(gotOk, w.wantOk) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } // return nil // } @@ -11954,9 +11963,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", -// args: args { -// uuid:"", -// }, // fields: fields { // core:nil, // eg:nil, @@ -12008,10 +12014,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // }, @@ -12022,9 +12028,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", -// args: args { -// uuid:"", -// }, // fields: fields { // core:nil, // eg:nil, @@ -12076,10 +12079,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // } @@ -12093,10 +12096,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt, test.args) +// test.beforeFunc(tt) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt, test.args) +// defer test.afterFunc(tt) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -12152,15 +12155,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotOid, gotOk := n.Exists(test.args.uuid) -// if err := checkFunc(test.want, gotOid, gotOk); err != nil { +// err := n.mktmp() +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_GetObject(t *testing.T) { +// func Test_ngt_Exists(t *testing.T) { // type args struct { // uuid string // } @@ -12214,28 +12217,24 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantVec []float32 -// wantTimestamp int64 -// err error +// wantOid uint32 +// wantOk bool // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, []float32, int64, error) error +// checkFunc func(want, uint32, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotVec []float32, gotTimestamp int64, err error) error { -// if !errors.Is(err, w.err) { -// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) -// } -// if !reflect.DeepEqual(gotVec, w.wantVec) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// defaultCheckFunc := func(w want, gotOid uint32, gotOk bool) error { +// if !reflect.DeepEqual(gotOid, w.wantOid) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOid, w.wantOid) // } -// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -12442,18 +12441,17 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotVec, gotTimestamp, err := n.GetObject(test.args.uuid) -// if err := checkFunc(test.want, gotVec, gotTimestamp, err); err != nil { +// gotOid, gotOk := n.Exists(test.args.uuid) +// if err := checkFunc(test.want, gotOid, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_readyForUpdate(t *testing.T) { +// func Test_ngt_GetObject(t *testing.T) { // type args struct { // uuid string -// vec []float32 // } // type fields struct { // core core.NGT @@ -12505,21 +12503,29 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// err error +// wantVec []float32 +// wantTimestamp int64 +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, error) error +// checkFunc func(want, []float32, int64, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, err error) error { +// defaultCheckFunc := func(w want, gotVec []float32, gotTimestamp int64, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) +// } // return nil // } // tests := []test{ @@ -12529,7 +12535,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // uuid:"", -// vec:nil, // }, // fields: fields { // core:nil, @@ -12598,7 +12603,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // uuid:"", -// vec:nil, // }, // fields: fields { // core:nil, @@ -12727,19 +12731,24 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.readyForUpdate(test.args.uuid, test.args.vec) -// if err := checkFunc(test.want, err); err != nil { +// gotVec, gotTimestamp, err := n.GetObject(test.args.uuid) +// if err := checkFunc(test.want, gotVec, gotTimestamp, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_IsSaving(t *testing.T) { -// type fields struct { -// core core.NGT -// eg errgroup.Group -// kvs kvs.BidiMap +// func Test_ngt_readyForUpdate(t *testing.T) { +// type args struct { +// uuid string +// vec []float32 +// ts int64 +// } +// type fields struct { +// core core.NGT +// eg errgroup.Group +// kvs kvs.BidiMap // fmap map[string]int64 // vq vqueue.Queue // indexing atomic.Value @@ -12786,19 +12795,20 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// want bool +// err error // } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, bool) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } // return nil // } @@ -12807,6 +12817,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", +// args: args { +// uuid:"", +// vec:nil, +// ts:0, +// }, // fields: fields { // core:nil, // eg:nil, @@ -12858,10 +12873,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -12872,6 +12887,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", +// args: args { +// uuid:"", +// vec:nil, +// ts:0, +// }, // fields: fields { // core:nil, // eg:nil, @@ -12923,10 +12943,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -12940,10 +12960,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -12999,15 +13019,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.IsSaving() -// if err := checkFunc(test.want, got); err != nil { +// err := n.readyForUpdate(test.args.uuid, test.args.vec, test.args.ts) +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_IsIndexing(t *testing.T) { +// func Test_ngt_IsSaving(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -13271,7 +13291,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.IsIndexing() +// got := n.IsSaving() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -13279,7 +13299,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_IsFlushing(t *testing.T) { +// func Test_ngt_IsIndexing(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -13543,7 +13563,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.IsFlushing() +// got := n.IsIndexing() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -13551,10 +13571,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_UUIDs(t *testing.T) { -// type args struct { -// ctx context.Context -// } +// func Test_ngt_IsFlushing(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -13605,20 +13622,19 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantUuids []string +// want bool // } // type test struct { // name string -// args args // fields fields // want want -// checkFunc func(want, []string) error -// beforeFunc func(*testing.T, args) -// afterFunc func(*testing.T, args) +// checkFunc func(want, bool) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, gotUuids []string) error { -// if !reflect.DeepEqual(gotUuids, w.wantUuids) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotUuids, w.wantUuids) +// defaultCheckFunc := func(w want, got bool) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } // return nil // } @@ -13627,9 +13643,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", -// args: args { -// ctx:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -13681,10 +13694,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // }, @@ -13695,9 +13708,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", -// args: args { -// ctx:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -13749,10 +13759,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // } @@ -13766,10 +13776,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt, test.args) +// test.beforeFunc(tt) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt, test.args) +// defer test.afterFunc(tt) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -13825,15 +13835,18 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotUuids := n.UUIDs(test.args.ctx) -// if err := checkFunc(test.want, gotUuids); err != nil { +// got := n.IsFlushing() +// if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_NumberOfCreateIndexExecution(t *testing.T) { +// func Test_ngt_UUIDs(t *testing.T) { +// type args struct { +// ctx context.Context +// } // type fields struct { // core core.NGT // eg errgroup.Group @@ -13884,19 +13897,20 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// want uint64 +// wantUuids []string // } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, uint64) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, []string) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got uint64) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotUuids []string) error { +// if !reflect.DeepEqual(gotUuids, w.wantUuids) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotUuids, w.wantUuids) // } // return nil // } @@ -13905,6 +13919,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -13956,10 +13973,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -13970,6 +13987,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -14021,10 +14041,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -14038,10 +14058,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -14097,15 +14117,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.NumberOfCreateIndexExecution() -// if err := checkFunc(test.want, got); err != nil { +// gotUuids := n.UUIDs(test.args.ctx) +// if err := checkFunc(test.want, gotUuids); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_NumberOfProactiveGCExecution(t *testing.T) { +// func Test_ngt_NumberOfCreateIndexExecution(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -14369,7 +14389,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.NumberOfProactiveGCExecution() +// got := n.NumberOfCreateIndexExecution() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -14377,7 +14397,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_lastNumberOfCreateIndexExecution(t *testing.T) { +// func Test_ngt_NumberOfProactiveGCExecution(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -14641,7 +14661,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.lastNumberOfCreateIndexExecution() +// got := n.NumberOfProactiveGCExecution() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -14649,7 +14669,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_gc(t *testing.T) { +// func Test_ngt_lastNumberOfCreateIndexExecution(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -14699,16 +14719,21 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // enableStatistics bool // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } -// type want struct{} +// type want struct { +// want uint64 +// } // type test struct { // name string // fields fields // want want -// checkFunc func(want) error +// checkFunc func(want, uint64) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, got uint64) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } // return nil // } // tests := []test{ @@ -14908,15 +14933,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// n.gc() -// if err := checkFunc(test.want); err != nil { +// got := n.lastNumberOfCreateIndexExecution() +// if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_Len(t *testing.T) { +// func Test_ngt_gc(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -14966,21 +14991,16 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // enableStatistics bool // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } -// type want struct { -// want uint64 -// } +// type want struct{} // type test struct { // name string // fields fields // want want -// checkFunc func(want, uint64) error +// checkFunc func(want) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got uint64) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) -// } +// defaultCheckFunc := func(w want) error { // return nil // } // tests := []test{ @@ -15180,15 +15200,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.Len() -// if err := checkFunc(test.want, got); err != nil { +// n.gc() +// if err := checkFunc(test.want); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_InsertVQueueBufferLen(t *testing.T) { +// func Test_ngt_Len(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -15452,7 +15472,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.InsertVQueueBufferLen() +// got := n.Len() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -15460,7 +15480,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_DeleteVQueueBufferLen(t *testing.T) { +// func Test_ngt_InsertVQueueBufferLen(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -15724,7 +15744,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.DeleteVQueueBufferLen() +// got := n.InsertVQueueBufferLen() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -15732,7 +15752,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_GetDimensionSize(t *testing.T) { +// func Test_ngt_DeleteVQueueBufferLen(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -15783,17 +15803,17 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// want int +// want uint64 // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, int) error +// checkFunc func(want, uint64) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got int) error { +// defaultCheckFunc := func(w want, got uint64) error { // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -15996,7 +16016,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.GetDimensionSize() +// got := n.DeleteVQueueBufferLen() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -16004,7 +16024,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_BrokenIndexCount(t *testing.T) { +// func Test_ngt_GetDimensionSize(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -16055,17 +16075,17 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// want uint64 +// want int // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, uint64) error +// checkFunc func(want, int) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got uint64) error { +// defaultCheckFunc := func(w want, got int) error { // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -16268,7 +16288,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.BrokenIndexCount() +// got := n.GetDimensionSize() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -16276,11 +16296,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_ListObjectFunc(t *testing.T) { -// type args struct { -// ctx context.Context -// f func(uuid string, oid uint32, ts int64) bool -// } +// func Test_ngt_BrokenIndexCount(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -16330,17 +16346,21 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // enableStatistics bool // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } -// type want struct{} +// type want struct { +// want uint64 +// } // type test struct { // name string -// args args // fields fields // want want -// checkFunc func(want) error -// beforeFunc func(*testing.T, args) -// afterFunc func(*testing.T, args) +// checkFunc func(want, uint64) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, got uint64) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } // return nil // } // tests := []test{ @@ -16348,10 +16368,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", -// args: args { -// ctx:nil, -// f:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -16403,10 +16419,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // }, @@ -16417,10 +16433,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", -// args: args { -// ctx:nil, -// f:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -16472,10 +16484,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // } @@ -16489,10 +16501,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt, test.args) +// test.beforeFunc(tt) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt, test.args) +// defer test.afterFunc(tt) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -16548,15 +16560,571 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// n.ListObjectFunc(test.args.ctx, test.args.f) -// if err := checkFunc(test.want); err != nil { +// got := n.BrokenIndexCount() +// if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_IndexStatistics(t *testing.T) { +// func Test_ngt_ListObjectFunc(t *testing.T) { +// type args struct { +// ctx context.Context +// f func(uuid string, oid uint32, ts int64) bool +// } +// type fields struct { +// core core.NGT +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// indexing atomic.Value +// flushing atomic.Bool +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// nobic uint64 +// nopvq atomic.Uint64 +// cfg *config.NGT +// opts []Option +// inMem bool +// dim int +// alen int +// lim time.Duration +// dur time.Duration +// sdur time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// podName string +// podNamespace string +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// brokenPath string +// poolSize uint32 +// radius float32 +// epsilon float32 +// idelay time.Duration +// dcd bool +// kvsdbConcurrency int +// historyLimit int +// isReadReplica bool +// enableExportIndexInfo bool +// exportIndexInfoDuration time.Duration +// patcher client.Patcher +// enableStatistics bool +// statisticsCache atomic.Pointer[payload.Info_Index_Statistics] +// } +// type want struct{} +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// f:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// f:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// n := &ngt{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// indexing: test.fields.indexing, +// flushing: test.fields.flushing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// nobic: test.fields.nobic, +// nopvq: test.fields.nopvq, +// cfg: test.fields.cfg, +// opts: test.fields.opts, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// alen: test.fields.alen, +// lim: test.fields.lim, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// podName: test.fields.podName, +// podNamespace: test.fields.podNamespace, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// brokenPath: test.fields.brokenPath, +// poolSize: test.fields.poolSize, +// radius: test.fields.radius, +// epsilon: test.fields.epsilon, +// idelay: test.fields.idelay, +// dcd: test.fields.dcd, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// historyLimit: test.fields.historyLimit, +// isReadReplica: test.fields.isReadReplica, +// enableExportIndexInfo: test.fields.enableExportIndexInfo, +// exportIndexInfoDuration: test.fields.exportIndexInfoDuration, +// patcher: test.fields.patcher, +// enableStatistics: test.fields.enableStatistics, +// statisticsCache: test.fields.statisticsCache, +// } +// +// n.ListObjectFunc(test.args.ctx, test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_ngt_IndexStatistics(t *testing.T) { +// type fields struct { +// core core.NGT +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// indexing atomic.Value +// flushing atomic.Bool +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// nobic uint64 +// nopvq atomic.Uint64 +// cfg *config.NGT +// opts []Option +// inMem bool +// dim int +// alen int +// lim time.Duration +// dur time.Duration +// sdur time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// podName string +// podNamespace string +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// brokenPath string +// poolSize uint32 +// radius float32 +// epsilon float32 +// idelay time.Duration +// dcd bool +// kvsdbConcurrency int +// historyLimit int +// isReadReplica bool +// enableExportIndexInfo bool +// exportIndexInfoDuration time.Duration +// patcher client.Patcher +// enableStatistics bool +// statisticsCache atomic.Pointer[payload.Info_Index_Statistics] +// } +// type want struct { +// wantStats *payload.Info_Index_Statistics +// err error +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, *payload.Info_Index_Statistics, error) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, gotStats *payload.Info_Index_Statistics, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotStats, w.wantStats) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotStats, w.wantStats) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// n := &ngt{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// indexing: test.fields.indexing, +// flushing: test.fields.flushing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// nobic: test.fields.nobic, +// nopvq: test.fields.nopvq, +// cfg: test.fields.cfg, +// opts: test.fields.opts, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// alen: test.fields.alen, +// lim: test.fields.lim, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// podName: test.fields.podName, +// podNamespace: test.fields.podNamespace, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// brokenPath: test.fields.brokenPath, +// poolSize: test.fields.poolSize, +// radius: test.fields.radius, +// epsilon: test.fields.epsilon, +// idelay: test.fields.idelay, +// dcd: test.fields.dcd, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// historyLimit: test.fields.historyLimit, +// isReadReplica: test.fields.isReadReplica, +// enableExportIndexInfo: test.fields.enableExportIndexInfo, +// exportIndexInfoDuration: test.fields.exportIndexInfoDuration, +// patcher: test.fields.patcher, +// enableStatistics: test.fields.enableStatistics, +// statisticsCache: test.fields.statisticsCache, +// } +// +// gotStats, err := n.IndexStatistics() +// if err := checkFunc(test.want, gotStats, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_ngt_IsStatisticsEnabled(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -16607,23 +17175,19 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantStats *payload.Info_Index_Statistics -// err error +// want bool // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, *payload.Info_Index_Statistics, error) error +// checkFunc func(want, bool) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, gotStats *payload.Info_Index_Statistics, err error) error { -// if !errors.Is(err, w.err) { -// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) -// } -// if !reflect.DeepEqual(gotStats, w.wantStats) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotStats, w.wantStats) +// defaultCheckFunc := func(w want, got bool) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } // return nil // } @@ -16824,15 +17388,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotStats, err := n.IndexStatistics() -// if err := checkFunc(test.want, gotStats, err); err != nil { +// got := n.IsStatisticsEnabled() +// if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_IsStatisticsEnabled(t *testing.T) { +// func Test_ngt_IndexProperty(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -16883,17 +17447,21 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// want bool +// want *payload.Info_Index_Property +// err error // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, bool) error +// checkFunc func(want, *payload.Info_Index_Property, error) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got bool) error { +// defaultCheckFunc := func(w want, got *payload.Info_Index_Property, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -17096,8 +17664,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.IsStatisticsEnabled() -// if err := checkFunc(test.want, got); err != nil { +// got, err := n.IndexProperty() +// if err := checkFunc(test.want, got, err); err != nil { // tt.Errorf("error = %v", err) // } // }) diff --git a/pkg/agent/internal/kvs/kvs.go b/pkg/agent/internal/kvs/kvs.go index f0c58612b9..1ca56b6bdd 100644 --- a/pkg/agent/internal/kvs/kvs.go +++ b/pkg/agent/internal/kvs/kvs.go @@ -51,10 +51,10 @@ type ValueStructUo struct { } type bidi struct { - concurrency int - l uint64 ou [slen]*sync.Map[uint32, valueStructOu] uo [slen]*sync.Map[string, ValueStructUo] + concurrency int + l uint64 } const ( @@ -64,6 +64,8 @@ const ( // mask is slen-1 Hex value. mask = 0x1FF // mask = 0xFFF. + + maxHashKeyLength = slen / 2 ) // New returns the bidi that satisfies the BidiMap interface. @@ -186,8 +188,8 @@ func (b *bidi) Close() error { } func getShardID(key string) (id uint64) { - if len(key) > 128 { - return xxh3.HashString(key[:128]) & mask + if len(key) > maxHashKeyLength { + return xxh3.HashString(key[:maxHashKeyLength]) & mask } return xxh3.HashString(key) & mask } diff --git a/pkg/agent/internal/kvs/kvs_test.go b/pkg/agent/internal/kvs/kvs_test.go index c9dbd9db14..7361bf5c99 100644 --- a/pkg/agent/internal/kvs/kvs_test.go +++ b/pkg/agent/internal/kvs/kvs_test.go @@ -229,7 +229,7 @@ func Test_bidi_Get(t *testing.T) { ) return test{ - name: "return the value when there is a value for the key and l of fields is maximun value of uint64", + name: "return the value when there is a value for the key and l of fields is maximum value of uint64", args: args{ key: key, }, @@ -496,7 +496,7 @@ func Test_bidi_GetInverse(t *testing.T) { ) return test{ - name: "return key and timestamp and true when there is a key for the value and l of fields is maximun value of uint64", + name: "return key and timestamp and true when there is a key for the value and l of fields is maximum value of uint64", args: args{ val: val, }, @@ -770,7 +770,7 @@ func Test_bidi_Set(t *testing.T) { ) return test{ - name: "set success when the key is not empty string and val is not 0 and l of fields is maximun value of uint64", + name: "set success when the key is not empty string and val is not 0 and l of fields is maximum value of uint64", args: args{ key: key, val: val, @@ -806,7 +806,7 @@ func Test_bidi_Set(t *testing.T) { ) return test{ - name: "set success when the key is already set and the same key is set twie", + name: "set success when the key is already set and the same key is set twice", args: args{ key: key, val: val, @@ -1071,7 +1071,7 @@ func Test_bidi_Delete(t *testing.T) { ) return test{ - name: "return val and true when the delete successes and l of fields is maximun value of uint64", + name: "return val and true when the delete successes and l of fields is maximum value of uint64", args: args{ key: key, }, @@ -1353,7 +1353,7 @@ func Test_bidi_DeleteInverse(t *testing.T) { ) return test{ - name: "return key and true when the delete successes and l of fields is maximun value of uint64", + name: "return key and true when the delete successes and l of fields is maximum value of uint64", args: args{ val: val, }, @@ -1644,7 +1644,7 @@ func Test_bidi_Range(t *testing.T) { var mu sync.Mutex return test{ - name: "rage get successes when l of fields is maximun value of uint64", + name: "rage get successes when l of fields is maximum value of uint64", args: args{ f: func(s string, u uint32, t int64) bool { mu.Lock() @@ -1743,7 +1743,7 @@ func Test_bidi_Len(t *testing.T) { }, }, { - name: "return maximun value when l of field is maximun value of uint64", + name: "return maximum value when l of field is maximum value of uint64", fields: fields{ l: math.MaxUint64, }, @@ -1793,10 +1793,10 @@ func Test_bidi_Len(t *testing.T) { // // func Test_bidi_Close(t *testing.T) { // type fields struct { -// concurrency int -// l uint64 // ou [slen]*sync.Map[uint32, valueStructOu] // uo [slen]*sync.Map[string, ValueStructUo] +// concurrency int +// l uint64 // } // type want struct { // err error @@ -1821,10 +1821,10 @@ func Test_bidi_Len(t *testing.T) { // { // name: "test_case_1", // fields: fields { -// concurrency:0, -// l:0, // ou:nil, // uo:nil, +// concurrency:0, +// l:0, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1843,10 +1843,10 @@ func Test_bidi_Len(t *testing.T) { // return test { // name: "test_case_2", // fields: fields { -// concurrency:0, -// l:0, // ou:nil, // uo:nil, +// concurrency:0, +// l:0, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1877,10 +1877,10 @@ func Test_bidi_Len(t *testing.T) { // checkFunc = defaultCheckFunc // } // b := &bidi{ -// concurrency: test.fields.concurrency, -// l: test.fields.l, // ou: test.fields.ou, // uo: test.fields.uo, +// concurrency: test.fields.concurrency, +// l: test.fields.l, // } // // err := b.Close() diff --git a/pkg/agent/internal/memstore/data_manager.go b/pkg/agent/internal/memstore/data_manager.go new file mode 100644 index 0000000000..c32a692a5d --- /dev/null +++ b/pkg/agent/internal/memstore/data_manager.go @@ -0,0 +1,217 @@ +// +// Copyright (C) 2019-2024 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package memstore + +import ( + "context" + + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/sync" + "github.com/vdaas/vald/pkg/agent/internal/kvs" + "github.com/vdaas/vald/pkg/agent/internal/vqueue" +) + +func Exists(kv kvs.BidiMap, vq vqueue.Queue, uuid string) (oid uint32, ok bool) { + var its, dts, kts int64 + _, its, dts, ok = vq.GetVectorWithTimestamp(uuid) + if !ok { + oid, kts, ok = kv.Get(uuid) + if !ok { + log.Debugf("Exists\tuuid: %s's data not found in kvsdb and insert vqueue\terror: %v", uuid, errors.ErrObjectIDNotFound(uuid)) + return 0, false + } + if kts < its { + kv.Set(uuid, oid, its) + } + if dts > its { + log.Debugf( + "Exists\tuuid: %s's data found in kvsdb and not found in insert vqueue, but delete vqueue data exists. the object will be delete soon\terror: %v", + uuid, + errors.ErrObjectIDNotFound(uuid), + ) + return 0, false + } + } + return oid, ok +} + +func GetObject( + kv kvs.BidiMap, vq vqueue.Queue, uuid string, getVectorFn func(oid uint32) ([]float32, error), +) (vec []float32, timestamp int64, err error) { + vec, its, dts, exists := vq.GetVectorWithTimestamp(uuid) + if exists { + return vec, its, nil + } + + oid, kts, ok := kv.Get(uuid) + if !ok { + log.Debugf("GetObject\tuuid: %s's data not found in kvsdb and insert vqueue", uuid) + return nil, 0, errors.ErrObjectIDNotFound(uuid) + } + + if kts < its { + kv.Set(uuid, oid, its) + } + + if ok && dts > its { + log.Debugf("GetObject\tuuid: %s's data found in kvsdb and not found in insert vqueue, but delete vqueue data exists. the object will be delete soon", uuid) + return nil, 0, errors.ErrObjectIDNotFound(uuid) + } + + if getVectorFn == nil { + return nil, kts, nil + } + + vec, err = getVectorFn(oid) + if err != nil { + log.Debugf("GetObject\tuuid: %s oid: %d's vector not found in ngt index", uuid, oid) + return nil, 0, errors.ErrObjectNotFound(err, uuid) + } + + return vec, kts, nil +} + +// ListObjectFunc applies the input function on each index stored in the kvs and vqueue. +// Use this function for performing something on each object with caring about the memory usage. +// If the vector exists in the vqueue, this vector is not indexed so the oid(object ID) is processed as 0. +func ListObjectFunc( + ctx context.Context, + kv kvs.BidiMap, + vq vqueue.Queue, + f func(uuid string, oid uint32, ts int64) bool, +) { + dup := make(map[string]bool, max(vq.DVQLen(), 3)/3) + vq.Range(ctx, func(uuid string, vec []float32, ts int64) (ok bool) { + oid, kts, ok := kv.Get(uuid) + if ok { // exists same data ikv + if ts > kts { // exist ikv but vq is newer + dup[uuid] = true + return f(uuid, oid, ts) + } + // exist in kv and kvs data is newer thavqueue skip and process it at kvs.Range + return true + } + // not exist in kv + return f(uuid, 0, ts) + }) + kv.Range(ctx, func(uuid string, oid uint32, ts int64) (ok bool) { + if dup[uuid] { + return true + } + // if delete vqueue data exists and timestamp of dvq is newer which means data will be delete soon, then skip process + dts, ok := vq.DVExists(uuid) + if ok && dts != 0 { + return true + } + return f(uuid, oid, ts) + }) +} + +func UUIDs(ctx context.Context, kv kvs.BidiMap, vq vqueue.Queue) (uuids []string) { + uuids = make([]string, 0, kv.Len()+uint64(vq.IVQLen())-uint64(vq.DVQLen())) + var mu sync.Mutex + ListObjectFunc(ctx, kv, vq, func(uuid string, oid uint32, _ int64) bool { + mu.Lock() + uuids = append(uuids, uuid) + mu.Unlock() + return true + }) + return uuids +} + +// UpdateTimestamp updates memstore(kvs, vqueue) data's timestamp +func UpdateTimestamp( + kv kvs.BidiMap, + vq vqueue.Queue, + uuid string, + ts int64, + force bool, + getVectorFn func(oid uint32) ([]float32, error), +) (err error) { + if len(uuid) == 0 { + return errors.ErrUUIDNotFound(0) // invalid uuid, we can't check any object without uuid + } + if !force && ts <= 0 { + return errors.ErrZeroTimestamp + } + vec, its, dts, vqok := vq.GetVectorWithTimestamp(uuid) // read insert/delete vqueue data + oid, kts, kvok := kv.Get(uuid) // read kvs data + if !vqok && !kvok { + return errors.ErrObjectNotFound(nil, uuid) // no object in memstore then return NotFound + } + if !force && (ts <= kts || ts <= its) { + return errors.ErrNewerTimestampObjectAlreadyExists(uuid, ts) // no old object found in this memstore + } + switch { + case vqok && !kvok && dts != 0 && dts < ts && (force || its < ts): + // if only found from vqueue and timestamp is newer than delete-vqueue-timestamp(dts) + // update insert-vqueue first + err = vq.PushInsert(uuid, vec, ts) + if err != nil { + return err + } + pdts, ok := vq.PopDelete(uuid) // there is no kvs data and ts is newer than dts which means we don't need to delete processing for uuid this time + if ok && pdts != dts { + // if time difference detected the data might be changed by another thread so we need to rollback + return vq.PushDelete(uuid, pdts) + } + return nil // succesfully update the vqueue + case vqok && kvok && dts < ts && (force || (kts < ts && its < ts)): + // if vqueue data exists and new timestamp never delete and force-update or timestamp is newer than insert queue timestamp + // update insert-vqueue first + err = vq.PushInsert(uuid, vec, ts) + if err != nil { + return err + } + // if updated insert-vqueue and data exists ikvdb and it's timestamp is older than query, update kvs data + kv.Set(uuid, oid, ts) + if dts == 0 { // if kvs data exists but not found delete-vqueue data it would be better to add delete vqueue for update + return vq.PushDelete(uuid, ts-1) + } + return nil // succesfully update the vqueue and kvs + case !vqok && its == 0 && kvok && (force || kts < ts): + // if insert-vqueue not found and kvs data found just update kvs data + kv.Set(uuid, oid, ts) + if dts != 0 && (force || dts < ts) { + // if delete-vqueue found and ts is newer than delete timestamp and kvs timestamp, should update kvs and remove delete-vqueue + pdts, ok := vq.PopDelete(uuid) + if ok && pdts != dts { + // if time difference detected the data might be changed by another thread so we need to rollback + return vq.PushDelete(uuid, pdts) // succesfully update the kvs but failed to dequeue delete-vqueue and rollbacked them + } + return nil // succesfully update the kvs and delete-vqueue + } + return nil // succesfully update the kvs + case !vqok && its != 0 && kvok && (force || kts < ts): + // if insert-vqueue found there are 2 case of vqok=false are vec==nil or dts > its so check kvok and update it and remove insert-vqueue + kv.Set(uuid, oid, ts) + if vec == nil && its > dts && getVectorFn != nil { + ovec, err := getVectorFn(oid) + if err == nil && ovec != nil { + return vq.PushInsert(uuid, ovec, ts) + } + } + pvec, pits, ok := vq.PopInsert(uuid) + if pvec != nil && ok && pits != its { + // if time difference detected the data might be changed by another thread so we need to rollback + return vq.PushInsert(uuid, pvec, pits) + } + return nil // succesfully update the kvs + } + return errors.ErrNothingToBeDoneForUpdate(uuid) +} diff --git a/pkg/agent/internal/memstore/data_manager_test.go b/pkg/agent/internal/memstore/data_manager_test.go new file mode 100644 index 0000000000..3c4450bdfc --- /dev/null +++ b/pkg/agent/internal/memstore/data_manager_test.go @@ -0,0 +1,493 @@ +// Copyright (C) 2019-2024 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package memstore + +// NOT IMPLEMENTED BELOW +// +// func TestExists(t *testing.T) { +// type args struct { +// kv kvs.BidiMap +// vq vqueue.Queue +// uuid string +// } +// type want struct { +// wantOid uint32 +// wantOk bool +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, uint32, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotOid uint32, gotOk bool) error { +// if !reflect.DeepEqual(gotOid, w.wantOid) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOid, w.wantOid) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// kv:nil, +// vq:nil, +// uuid:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// kv:nil, +// vq:nil, +// uuid:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// gotOid, gotOk := Exists(test.args.kv, test.args.vq, test.args.uuid) +// if err := checkFunc(test.want, gotOid, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestGetObject(t *testing.T) { +// type args struct { +// kv kvs.BidiMap +// vq vqueue.Queue +// uuid string +// getVectorFn func(oid uint32) ([]float32, error) +// } +// type want struct { +// wantVec []float32 +// wantTimestamp int64 +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, []float32, int64, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotVec []float32, gotTimestamp int64, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// kv:nil, +// vq:nil, +// uuid:"", +// getVectorFn:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// kv:nil, +// vq:nil, +// uuid:"", +// getVectorFn:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// gotVec, gotTimestamp, err := GetObject(test.args.kv, test.args.vq, test.args.uuid, test.args.getVectorFn) +// if err := checkFunc(test.want, gotVec, gotTimestamp, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestListObjectFunc(t *testing.T) { +// type args struct { +// ctx context.Context +// kv kvs.BidiMap +// vq vqueue.Queue +// f func(uuid string, oid uint32, ts int64) bool +// } +// type want struct{} +// type test struct { +// name string +// args args +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// kv:nil, +// vq:nil, +// f:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// kv:nil, +// vq:nil, +// f:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// ListObjectFunc(test.args.ctx, test.args.kv, test.args.vq, test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestUUIDs(t *testing.T) { +// type args struct { +// ctx context.Context +// kv kvs.BidiMap +// vq vqueue.Queue +// } +// type want struct { +// wantUuids []string +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, []string) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotUuids []string) error { +// if !reflect.DeepEqual(gotUuids, w.wantUuids) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotUuids, w.wantUuids) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// kv:nil, +// vq:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// kv:nil, +// vq:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// gotUuids := UUIDs(test.args.ctx, test.args.kv, test.args.vq) +// if err := checkFunc(test.want, gotUuids); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestUpdateTimestamp(t *testing.T) { +// type args struct { +// kv kvs.BidiMap +// vq vqueue.Queue +// uuid string +// ts int64 +// force bool +// getVectorFn func(oid uint32) ([]float32, error) +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// kv:nil, +// vq:nil, +// uuid:"", +// ts:0, +// force:false, +// getVectorFn:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// kv:nil, +// vq:nil, +// uuid:"", +// ts:0, +// force:false, +// getVectorFn:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// err := UpdateTimestamp(test.args.kv, test.args.vq, test.args.uuid, test.args.ts, test.args.force, test.args.getVectorFn) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/agent/internal/vqueue/queue.go b/pkg/agent/internal/vqueue/queue.go index ae073c5298..78a0b99032 100644 --- a/pkg/agent/internal/vqueue/queue.go +++ b/pkg/agent/internal/vqueue/queue.go @@ -32,14 +32,17 @@ import ( // Queue represents vector queue cache interface. type Queue interface { - PushInsert(uuid string, vector []float32, date int64) error - PushDelete(uuid string, date int64) error + PushInsert(uuid string, vector []float32, timestamp int64) error + PushDelete(uuid string, timestamp int64) error + PopInsert(uuid string) (vector []float32, timestamp int64, ok bool) + PopDelete(uuid string) (timestamp int64, ok bool) GetVector(uuid string) (vec []float32, timestamp int64, exists bool) Range(ctx context.Context, f func(uuid string, vector []float32, ts int64) bool) - RangePopInsert(ctx context.Context, now int64, f func(uuid string, vector []float32, date int64) bool) + GetVectorWithTimestamp(uuid string) (vec []float32, its, dts int64, exists bool) + RangePopInsert(ctx context.Context, now int64, f func(uuid string, vector []float32, timestamp int64) bool) RangePopDelete(ctx context.Context, now int64, f func(uuid string) bool) - IVExists(uuid string) bool - DVExists(uuid string) bool + IVExists(uuid string) (timestamp int64, ok bool) + DVExists(uuid string) (timestamp int64, ok bool) IVQLen() int DVQLen() int } @@ -50,9 +53,9 @@ type vqueue struct { } type index struct { - date int64 - vector []float32 - uuid string + uuid string + vector []float32 + timestamp int64 } func New(opts ...Option) (Queue, error) { @@ -72,22 +75,25 @@ func New(opts ...Option) (Queue, error) { return vq, nil } -func (v *vqueue) PushInsert(uuid string, vector []float32, date int64) error { - if date == 0 { - date = time.Now().UnixNano() +func (v *vqueue) PushInsert(uuid string, vector []float32, timestamp int64) error { + if len(uuid) == 0 || vector == nil { + return nil + } + if timestamp == 0 { + timestamp = time.Now().UnixNano() } - didx, ok := v.dl.Load(uuid) - if ok && didx.date > date { + dts, ok := v.loadDVQ(uuid) + if ok && newer(dts, timestamp) { return nil } idx := index{ - uuid: uuid, - vector: vector, - date: date, + uuid: uuid, + vector: vector, + timestamp: timestamp, } oidx, loaded := v.il.LoadOrStore(uuid, &idx) if loaded { - if date > oidx.date { // if data already exists and existing index is older than new one + if newer(timestamp, oidx.timestamp) { // if data already exists and existing index is older than new one v.il.Store(uuid, &idx) } } else { @@ -96,17 +102,20 @@ func (v *vqueue) PushInsert(uuid string, vector []float32, date int64) error { return nil } -func (v *vqueue) PushDelete(uuid string, date int64) error { - if date == 0 { - date = time.Now().UnixNano() +func (v *vqueue) PushDelete(uuid string, timestamp int64) error { + if len(uuid) == 0 { + return nil + } + if timestamp == 0 { + timestamp = time.Now().UnixNano() } idx := index{ - uuid: uuid, - date: date, + uuid: uuid, + timestamp: timestamp, } oidx, loaded := v.dl.LoadOrStore(uuid, &idx) if loaded { - if date > oidx.date { // if data already exists and existing index is older than new one + if newer(timestamp, oidx.timestamp) { // if data already exists and existing index is older than new one v.dl.Store(uuid, &idx) } } else { @@ -115,81 +124,103 @@ func (v *vqueue) PushDelete(uuid string, date int64) error { return nil } +func (v *vqueue) PopInsert(uuid string) (vector []float32, timestamp int64, ok bool) { + var idx *index + idx, ok = v.il.LoadAndDelete(uuid) + if !ok || idx == nil || idx.timestamp == 0 { + return nil, 0, false + } + _ = atomic.AddUint64(&v.ic, ^uint64(0)) + return idx.vector, idx.timestamp, ok +} + +func (v *vqueue) PopDelete(uuid string) (timestamp int64, ok bool) { + var idx *index + idx, ok = v.dl.LoadAndDelete(uuid) + if !ok || idx == nil || idx.timestamp == 0 { + return 0, false + } + _ = atomic.AddUint64(&v.dc, ^uint64(0)) + return idx.timestamp, ok +} + // GetVector returns the vector stored in the queue. +func (v *vqueue) GetVector(uuid string) (vec []float32, timestamp int64, exists bool) { + vec, timestamp, _, exists = v.getVector(uuid, false) + return vec, timestamp, exists +} + +// GetVectorWithTimestamp returns the vector and timestamps stored in the queue. +func (v *vqueue) GetVectorWithTimestamp(uuid string) (vec []float32, its, dts int64, exists bool) { + return v.getVector(uuid, true) +} + +// getVector returns the vector and timestamps stored in the queue. // If the same UUID exists in the insert queue and the delete queue, the timestamp is compared. // And the vector is returned if the timestamp in the insert queue is newer than the delete queue. -func (v *vqueue) GetVector(uuid string) (vec []float32, timestamp int64, exists bool) { - idx, ok := v.il.Load(uuid) - if !ok { - // data not in the insert queue then return not exists(false) - return nil, 0, false +func (v *vqueue) getVector( + uuid string, enableDeleteTimestamp bool, +) (vec []float32, its, dts int64, ok bool) { + vec, its, ok = v.loadIVQ(uuid) + if !ok || vec == nil { + if !enableDeleteTimestamp { + // data not in the insert queue then return not exists(false) + return nil, 0, 0, false + } + dts, ok = v.loadDVQ(uuid) + if !ok || dts == 0 { + // data not in the delete queue and insert queue then return not exists(false) + return nil, 0, 0, false + } + // data not in theinsert queue and exists in delete queue then return not exists(false) with delete index timestamp + return nil, 0, dts, false } - didx, ok := v.dl.Load(uuid) - if !ok { + dts, ok = v.loadDVQ(uuid) + if !ok || dts == 0 { // data not in the delete queue but exists in insert queue then return exists(true) - return idx.vector, idx.date, true + return vec, its, 0, vec != nil // usually vec is non-nil which means true } - // data exists both queue, compare data timestamp if insert queue timestamp is newer than delete one, this function returns exists(true) - if didx.date <= idx.date { - return idx.vector, idx.date, true - } - return nil, 0, false + // data exists both queue, compare data timestamp if insert queue timestamp is newer than delete one last value will true + // However, if insert and delete are sent by the update instruction, the timestamp will be the same + return vec, its, dts, vec != nil && newer(its, dts) // ususaly vec is non-nil } -// IVExists returns true if there is the UUID in the insert queue. +// IVExists returns timestamp of iv and true if there is the UUID in the insert queue. // If the same UUID exists in the insert queue and the delete queue, the timestamp is compared. // And the true is returned if the timestamp in the insert queue is newer than the delete queue. -func (v *vqueue) IVExists(uuid string) bool { - idx, ok := v.il.Load(uuid) - if !ok { - // data not in the insert queue then return not exists(false) - return false - } - didx, ok := v.dl.Load(uuid) - if !ok { - // data not in the delete queue but exists in insert queue then return exists(true) - return true +func (v *vqueue) IVExists(uuid string) (its int64, ok bool) { + _, its, _, ok = v.getVector(uuid, false) + if !ok || its == 0 { + return 0, false } - // data exists both queue, compare data timestamp if insert queue timestamp is newer than delete one, this function returns exists(true) - // However, if insert and delete are sent by the update instruction, the timestamp will be the same - return didx.date <= idx.date + return its, true } -// DVExists returns true if there is the UUID in the delete queue. +// DVExists returns timestamp of dv and true if there is the UUID in the delete queue. // If the same UUID exists in the insert queue and the delete queue, the timestamp is compared. // And the true is returned if the timestamp in the delete queue is newer than the insert queue. -func (v *vqueue) DVExists(uuid string) bool { - didx, ok := v.dl.Load(uuid) - if !ok { - return false - } - idx, ok := v.il.Load(uuid) - if !ok { - // data not in the insert queue then return not exists(false) - return true +func (v *vqueue) DVExists(uuid string) (dts int64, ok bool) { + _, _, dts, ok = v.getVector(uuid, true) + if ok || dts == 0 { + return 0, false } - - // data exists both queue, compare data timestamp if insert queue timestamp is newer than delete one, this function returns exists(true) - return didx.date > idx.date + return dts, true } func (v *vqueue) RangePopInsert( - ctx context.Context, now int64, f func(uuid string, vector []float32, date int64) bool, + ctx context.Context, now int64, f func(uuid string, vector []float32, timestamp int64) bool, ) { uii := make([]index, 0, atomic.LoadUint64(&v.ic)) defer func() { uii = nil }() v.il.Range(func(uuid string, idx *index) bool { - if idx.date > now { + if newer(idx.timestamp, now) { return true } - didx, ok := v.dl.Load(uuid) - if ok { - if idx.date < didx.date { - v.il.Delete(idx.uuid) - atomic.AddUint64(&v.ic, ^uint64(0)) - } + dts, ok := v.loadDVQ(uuid) + if ok && newer(dts, idx.timestamp) { + _, _, _ = v.PopInsert(uuid) return true } uii = append(uii, *idx) @@ -201,14 +232,14 @@ func (v *vqueue) RangePopInsert( return true }) slices.SortFunc(uii, func(left, right index) int { - return cmp.Compare(right.date, left.date) + return cmp.Compare(right.timestamp, left.timestamp) }) for _, idx := range uii { - if !f(idx.uuid, idx.vector, idx.date) { + if !f(idx.uuid, idx.vector, idx.timestamp) { return } - v.il.Delete(idx.uuid) - atomic.AddUint64(&v.ic, ^uint64(0)) + + _, _, _ = v.PopInsert(idx.uuid) select { case <-ctx.Done(): return @@ -223,7 +254,7 @@ func (v *vqueue) RangePopDelete(ctx context.Context, now int64, f func(uuid stri udi = nil }() v.dl.Range(func(_ string, idx *index) bool { - if idx.date > now { + if newer(idx.timestamp, now) { return true } udi = append(udi, *idx) @@ -235,18 +266,16 @@ func (v *vqueue) RangePopDelete(ctx context.Context, now int64, f func(uuid stri return true }) slices.SortFunc(udi, func(left, right index) int { - return cmp.Compare(right.date, left.date) + return cmp.Compare(right.timestamp, left.timestamp) }) - for _, idx := range udi { - if !f(idx.uuid) { + for _, didx := range udi { + if !f(didx.uuid) { return } - v.dl.Delete(idx.uuid) - atomic.AddUint64(&v.dc, ^uint64(0)) - iidx, ok := v.il.Load(idx.uuid) - if ok && idx.date > iidx.date { - v.il.Delete(idx.uuid) - atomic.AddUint64(&v.ic, ^uint64(0)) + _, _ = v.PopDelete(didx.uuid) + _, its, ok := v.loadIVQ(didx.uuid) + if ok && newer(didx.timestamp, its) { + _, _, _ = v.PopInsert(didx.uuid) } select { case <-ctx.Done(): @@ -263,9 +292,9 @@ func (v *vqueue) Range(_ context.Context, f func(uuid string, vector []float32, if idx == nil { return true } - didx, ok := v.dl.Load(uuid) - if !ok || (didx != nil && idx.date > didx.date) { - return f(uuid, idx.vector, idx.date) + dts, ok := v.loadDVQ(uuid) + if !ok || newer(idx.timestamp, dts) { + return f(uuid, idx.vector, idx.timestamp) } return true }) @@ -280,3 +309,25 @@ func (v *vqueue) IVQLen() (l int) { func (v *vqueue) DVQLen() (l int) { return int(atomic.LoadUint64(&v.dc)) } + +func (v *vqueue) loadIVQ(uuid string) (vec []float32, ts int64, ok bool) { + var idx *index + idx, ok = v.il.Load(uuid) + if !ok || idx == nil { + return nil, 0, false + } + return idx.vector, idx.timestamp, true +} + +func (v *vqueue) loadDVQ(uuid string) (ts int64, ok bool) { + var idx *index + idx, ok = v.dl.Load(uuid) + if !ok || idx == nil { + return 0, false + } + return idx.timestamp, true +} + +func newer(ts1, ts2 int64) bool { + return ts1 > ts2 +} diff --git a/pkg/agent/internal/vqueue/queue_test.go b/pkg/agent/internal/vqueue/queue_test.go index 2bf8ea74a5..675956192c 100644 --- a/pkg/agent/internal/vqueue/queue_test.go +++ b/pkg/agent/internal/vqueue/queue_test.go @@ -172,9 +172,9 @@ func TestGetVector(t *testing.T) { // // func Test_vqueue_PushInsert(t *testing.T) { // type args struct { -// uuid string -// vector []float32 -// date int64 +// uuid string +// vector []float32 +// timestamp int64 // } // type fields struct { // il sync.Map[string, *index] @@ -208,7 +208,7 @@ func TestGetVector(t *testing.T) { // args: args { // uuid:"", // vector:nil, -// date:0, +// timestamp:0, // }, // fields: fields { // il:nil, @@ -235,7 +235,7 @@ func TestGetVector(t *testing.T) { // args: args { // uuid:"", // vector:nil, -// date:0, +// timestamp:0, // }, // fields: fields { // il:nil, @@ -278,7 +278,7 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// err := v.PushInsert(test.args.uuid, test.args.vector, test.args.date) +// err := v.PushInsert(test.args.uuid, test.args.vector, test.args.timestamp) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -288,8 +288,8 @@ func TestGetVector(t *testing.T) { // // func Test_vqueue_PushDelete(t *testing.T) { // type args struct { -// uuid string -// date int64 +// uuid string +// timestamp int64 // } // type fields struct { // il sync.Map[string, *index] @@ -322,7 +322,7 @@ func TestGetVector(t *testing.T) { // name: "test_case_1", // args: args { // uuid:"", -// date:0, +// timestamp:0, // }, // fields: fields { // il:nil, @@ -348,7 +348,7 @@ func TestGetVector(t *testing.T) { // name: "test_case_2", // args: args { // uuid:"", -// date:0, +// timestamp:0, // }, // fields: fields { // il:nil, @@ -391,7 +391,7 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// err := v.PushDelete(test.args.uuid, test.args.date) +// err := v.PushDelete(test.args.uuid, test.args.timestamp) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -399,7 +399,7 @@ func TestGetVector(t *testing.T) { // } // } // -// func Test_vqueue_GetVector(t *testing.T) { +// func Test_vqueue_PopInsert(t *testing.T) { // type args struct { // uuid string // } @@ -410,9 +410,9 @@ func TestGetVector(t *testing.T) { // dc uint64 // } // type want struct { -// wantVec []float32 +// wantVector []float32 // wantTimestamp int64 -// wantExists bool +// wantOk bool // } // type test struct { // name string @@ -423,15 +423,15 @@ func TestGetVector(t *testing.T) { // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotVec []float32, gotTimestamp int64, gotExists bool) error { -// if !reflect.DeepEqual(gotVec, w.wantVec) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// defaultCheckFunc := func(w want, gotVector []float32, gotTimestamp int64, gotOk bool) error { +// if !reflect.DeepEqual(gotVector, w.wantVector) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVector, w.wantVector) // } // if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) // } -// if !reflect.DeepEqual(gotExists, w.wantExists) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotExists, w.wantExists) +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -509,15 +509,15 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// gotVec, gotTimestamp, gotExists := v.GetVector(test.args.uuid) -// if err := checkFunc(test.want, gotVec, gotTimestamp, gotExists); err != nil { +// gotVector, gotTimestamp, gotOk := v.PopInsert(test.args.uuid) +// if err := checkFunc(test.want, gotVector, gotTimestamp, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_IVExists(t *testing.T) { +// func Test_vqueue_PopDelete(t *testing.T) { // type args struct { // uuid string // } @@ -528,20 +528,24 @@ func TestGetVector(t *testing.T) { // dc uint64 // } // type want struct { -// want bool +// wantTimestamp int64 +// wantOk bool // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, bool) error +// checkFunc func(want, int64, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotTimestamp int64, gotOk bool) error { +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -619,15 +623,15 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// got := v.IVExists(test.args.uuid) -// if err := checkFunc(test.want, got); err != nil { +// gotTimestamp, gotOk := v.PopDelete(test.args.uuid) +// if err := checkFunc(test.want, gotTimestamp, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_DVExists(t *testing.T) { +// func Test_vqueue_GetVector(t *testing.T) { // type args struct { // uuid string // } @@ -638,20 +642,28 @@ func TestGetVector(t *testing.T) { // dc uint64 // } // type want struct { -// want bool +// wantVec []float32 +// wantTimestamp int64 +// wantExists bool // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, bool) error +// checkFunc func(want, []float32, int64, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotVec []float32, gotTimestamp int64, gotExists bool) error { +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) +// } +// if !reflect.DeepEqual(gotExists, w.wantExists) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotExists, w.wantExists) // } // return nil // } @@ -729,19 +741,17 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// got := v.DVExists(test.args.uuid) -// if err := checkFunc(test.want, got); err != nil { +// gotVec, gotTimestamp, gotExists := v.GetVector(test.args.uuid) +// if err := checkFunc(test.want, gotVec, gotTimestamp, gotExists); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_RangePopInsert(t *testing.T) { +// func Test_vqueue_GetVectorWithTimestamp(t *testing.T) { // type args struct { -// ctx context.Context -// now int64 -// f func(uuid string, vector []float32, date int64) bool +// uuid string // } // type fields struct { // il sync.Map[string, *index] @@ -749,17 +759,34 @@ func TestGetVector(t *testing.T) { // ic uint64 // dc uint64 // } -// type want struct{} +// type want struct { +// wantVec []float32 +// wantIts int64 +// wantDts int64 +// wantExists bool +// } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want) error +// checkFunc func(want, []float32, int64, int64, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, gotVec []float32, gotIts int64, gotDts int64, gotExists bool) error { +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotIts, w.wantIts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotIts, w.wantIts) +// } +// if !reflect.DeepEqual(gotDts, w.wantDts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDts, w.wantDts) +// } +// if !reflect.DeepEqual(gotExists, w.wantExists) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotExists, w.wantExists) +// } // return nil // } // tests := []test{ @@ -768,9 +795,7 @@ func TestGetVector(t *testing.T) { // { // name: "test_case_1", // args: args { -// ctx:nil, -// now:0, -// f:nil, +// uuid:"", // }, // fields: fields { // il:nil, @@ -795,9 +820,7 @@ func TestGetVector(t *testing.T) { // return test { // name: "test_case_2", // args: args { -// ctx:nil, -// now:0, -// f:nil, +// uuid:"", // }, // fields: fields { // il:nil, @@ -840,19 +863,18 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// v.RangePopInsert(test.args.ctx, test.args.now, test.args.f) -// if err := checkFunc(test.want); err != nil { +// gotVec, gotIts, gotDts, gotExists := v.GetVectorWithTimestamp(test.args.uuid) +// if err := checkFunc(test.want, gotVec, gotIts, gotDts, gotExists); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_RangePopDelete(t *testing.T) { +// func Test_vqueue_getVector(t *testing.T) { // type args struct { -// ctx context.Context -// now int64 -// f func(uuid string) bool +// uuid string +// enableDeleteTimestamp bool // } // type fields struct { // il sync.Map[string, *index] @@ -860,17 +882,34 @@ func TestGetVector(t *testing.T) { // ic uint64 // dc uint64 // } -// type want struct{} +// type want struct { +// wantVec []float32 +// wantIts int64 +// wantDts int64 +// wantOk bool +// } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want) error +// checkFunc func(want, []float32, int64, int64, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, gotVec []float32, gotIts int64, gotDts int64, gotOk bool) error { +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotIts, w.wantIts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotIts, w.wantIts) +// } +// if !reflect.DeepEqual(gotDts, w.wantDts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDts, w.wantDts) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } // return nil // } // tests := []test{ @@ -879,9 +918,8 @@ func TestGetVector(t *testing.T) { // { // name: "test_case_1", // args: args { -// ctx:nil, -// now:0, -// f:nil, +// uuid:"", +// enableDeleteTimestamp:false, // }, // fields: fields { // il:nil, @@ -906,9 +944,8 @@ func TestGetVector(t *testing.T) { // return test { // name: "test_case_2", // args: args { -// ctx:nil, -// now:0, -// f:nil, +// uuid:"", +// enableDeleteTimestamp:false, // }, // fields: fields { // il:nil, @@ -951,18 +988,17 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// v.RangePopDelete(test.args.ctx, test.args.now, test.args.f) -// if err := checkFunc(test.want); err != nil { +// gotVec, gotIts, gotDts, gotOk := v.getVector(test.args.uuid, test.args.enableDeleteTimestamp) +// if err := checkFunc(test.want, gotVec, gotIts, gotDts, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_Range(t *testing.T) { +// func Test_vqueue_IVExists(t *testing.T) { // type args struct { -// in0 context.Context -// f func(uuid string, vector []float32, ts int64) bool +// uuid string // } // type fields struct { // il sync.Map[string, *index] @@ -970,17 +1006,26 @@ func TestGetVector(t *testing.T) { // ic uint64 // dc uint64 // } -// type want struct{} +// type want struct { +// wantIts int64 +// wantOk bool +// } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want) error +// checkFunc func(want, int64, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, gotIts int64, gotOk bool) error { +// if !reflect.DeepEqual(gotIts, w.wantIts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotIts, w.wantIts) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } // return nil // } // tests := []test{ @@ -989,8 +1034,7 @@ func TestGetVector(t *testing.T) { // { // name: "test_case_1", // args: args { -// in0:nil, -// f:nil, +// uuid:"", // }, // fields: fields { // il:nil, @@ -1015,8 +1059,7 @@ func TestGetVector(t *testing.T) { // return test { // name: "test_case_2", // args: args { -// in0:nil, -// f:nil, +// uuid:"", // }, // fields: fields { // il:nil, @@ -1059,15 +1102,18 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// v.Range(test.args.in0, test.args.f) -// if err := checkFunc(test.want); err != nil { +// gotIts, gotOk := v.IVExists(test.args.uuid) +// if err := checkFunc(test.want, gotIts, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_IVQLen(t *testing.T) { +// func Test_vqueue_DVExists(t *testing.T) { +// type args struct { +// uuid string +// } // type fields struct { // il sync.Map[string, *index] // dl sync.Map[string, *index] @@ -1075,19 +1121,24 @@ func TestGetVector(t *testing.T) { // dc uint64 // } // type want struct { -// wantL int +// wantDts int64 +// wantOk bool // } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, int) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotL int) error { -// if !reflect.DeepEqual(gotL, w.wantL) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotL, w.wantL) +// defaultCheckFunc := func(w want, gotDts int64, gotOk bool) error { +// if !reflect.DeepEqual(gotDts, w.wantDts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDts, w.wantDts) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -1096,6 +1147,9 @@ func TestGetVector(t *testing.T) { // /* // { // name: "test_case_1", +// args: args { +// uuid:"", +// }, // fields: fields { // il:nil, // dl:nil, @@ -1104,10 +1158,10 @@ func TestGetVector(t *testing.T) { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -1118,6 +1172,9 @@ func TestGetVector(t *testing.T) { // func() test { // return test { // name: "test_case_2", +// args: args { +// uuid:"", +// }, // fields: fields { // il:nil, // dl:nil, @@ -1126,10 +1183,10 @@ func TestGetVector(t *testing.T) { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -1143,10 +1200,10 @@ func TestGetVector(t *testing.T) { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -1159,36 +1216,37 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// gotL := v.IVQLen() -// if err := checkFunc(test.want, gotL); err != nil { +// gotDts, gotOk := v.DVExists(test.args.uuid) +// if err := checkFunc(test.want, gotDts, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_DVQLen(t *testing.T) { +// func Test_vqueue_RangePopInsert(t *testing.T) { +// type args struct { +// ctx context.Context +// now int64 +// f func(uuid string, vector []float32, timestamp int64) bool +// } // type fields struct { // il sync.Map[string, *index] // dl sync.Map[string, *index] // ic uint64 // dc uint64 // } -// type want struct { -// wantL int -// } +// type want struct{} // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, int) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotL int) error { -// if !reflect.DeepEqual(gotL, w.wantL) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotL, w.wantL) -// } +// defaultCheckFunc := func(w want) error { // return nil // } // tests := []test{ @@ -1196,6 +1254,11 @@ func TestGetVector(t *testing.T) { // /* // { // name: "test_case_1", +// args: args { +// ctx:nil, +// now:0, +// f:nil, +// }, // fields: fields { // il:nil, // dl:nil, @@ -1204,10 +1267,10 @@ func TestGetVector(t *testing.T) { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -1218,6 +1281,11 @@ func TestGetVector(t *testing.T) { // func() test { // return test { // name: "test_case_2", +// args: args { +// ctx:nil, +// now:0, +// f:nil, +// }, // fields: fields { // il:nil, // dl:nil, @@ -1226,10 +1294,10 @@ func TestGetVector(t *testing.T) { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -1243,10 +1311,10 @@ func TestGetVector(t *testing.T) { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -1259,8 +1327,747 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// gotL := v.DVQLen() -// if err := checkFunc(test.want, gotL); err != nil { +// v.RangePopInsert(test.args.ctx, test.args.now, test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_RangePopDelete(t *testing.T) { +// type args struct { +// ctx context.Context +// now int64 +// f func(uuid string) bool +// } +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct{} +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// now:0, +// f:nil, +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// now:0, +// f:nil, +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// v.RangePopDelete(test.args.ctx, test.args.now, test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_Range(t *testing.T) { +// type args struct { +// in0 context.Context +// f func(uuid string, vector []float32, ts int64) bool +// } +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct{} +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// in0:nil, +// f:nil, +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// in0:nil, +// f:nil, +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// v.Range(test.args.in0, test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_IVQLen(t *testing.T) { +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct { +// wantL int +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, int) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, gotL int) error { +// if !reflect.DeepEqual(gotL, w.wantL) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotL, w.wantL) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// gotL := v.IVQLen() +// if err := checkFunc(test.want, gotL); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_DVQLen(t *testing.T) { +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct { +// wantL int +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, int) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, gotL int) error { +// if !reflect.DeepEqual(gotL, w.wantL) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotL, w.wantL) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// gotL := v.DVQLen() +// if err := checkFunc(test.want, gotL); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_loadIVQ(t *testing.T) { +// type args struct { +// uuid string +// } +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct { +// wantVec []float32 +// wantTs int64 +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, []float32, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotVec []float32, gotTs int64, gotOk bool) error { +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotTs, w.wantTs) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTs, w.wantTs) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// uuid:"", +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// uuid:"", +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// gotVec, gotTs, gotOk := v.loadIVQ(test.args.uuid) +// if err := checkFunc(test.want, gotVec, gotTs, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_loadDVQ(t *testing.T) { +// type args struct { +// uuid string +// } +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct { +// wantTs int64 +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotTs int64, gotOk bool) error { +// if !reflect.DeepEqual(gotTs, w.wantTs) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTs, w.wantTs) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// uuid:"", +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// uuid:"", +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// gotTs, gotOk := v.loadDVQ(test.args.uuid) +// if err := checkFunc(test.want, gotTs, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_newer(t *testing.T) { +// type args struct { +// ts1 int64 +// ts2 int64 +// } +// type want struct { +// want bool +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got bool) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ts1:0, +// ts2:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ts1:0, +// ts2:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := newer(test.args.ts1, test.args.ts2) +// if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } // }) diff --git a/pkg/agent/internal/vqueue/stateful_test.go b/pkg/agent/internal/vqueue/stateful_test.go index b1d80ee327..60d1d254bc 100644 --- a/pkg/agent/internal/vqueue/stateful_test.go +++ b/pkg/agent/internal/vqueue/stateful_test.go @@ -388,7 +388,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.IVExists(idA) + _, exists := q.IVExists(idA) return &resultContainer{ exists: exists, } @@ -439,7 +439,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.IVExists(idB) + _, exists := q.IVExists(idB) return &resultContainer{ exists: exists, } @@ -490,7 +490,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.IVExists(idC) + _, exists := q.IVExists(idC) return &resultContainer{ exists: exists, } @@ -541,7 +541,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.DVExists(idA) + _, exists := q.DVExists(idA) return &resultContainer{ exists: exists, } @@ -592,7 +592,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.DVExists(idB) + _, exists := q.DVExists(idB) return &resultContainer{ exists: exists, } @@ -643,7 +643,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.DVExists(idC) + _, exists := q.DVExists(idC) return &resultContainer{ exists: exists, } diff --git a/pkg/gateway/lb/handler/grpc/handler.go b/pkg/gateway/lb/handler/grpc/handler.go index 205316e5c5..7d663e0604 100644 --- a/pkg/gateway/lb/handler/grpc/handler.go +++ b/pkg/gateway/lb/handler/grpc/handler.go @@ -361,55 +361,17 @@ func (s *server) SearchByID( } return nil, err } - vec, err := s.getObject(ctx, uuid) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) if err != nil { - var ( - attrs trace.Attributes - st *status.Status - msg string - ) - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = nil - default: - st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) - if st == nil || st.Code() == codes.NotFound { - err = nil - } - } - if err != nil { - if span != nil { - span.RecordError(err) - span.SetAttributes(attrs...) - span.SetStatus(trace.StatusError, err.Error()) - } - return nil, err + st, msg, err := status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if span != nil && st != nil && st.Code() != codes.NotFound { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, err.Error()) } // try search by using agent's SearchByID method this operation is emergency fallback, the search quality is not same as usual SearchByID operation. res, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { @@ -851,55 +813,17 @@ func (s *server) LinearSearchByID( } return nil, err } - vec, err := s.getObject(ctx, uuid) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) if err != nil { - var ( - attrs trace.Attributes - st *status.Status - msg string - ) - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = nil - default: - st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) - if st == nil || st.Code() == codes.NotFound { - err = nil - } - } - if err != nil { - if span != nil { - span.RecordError(err) - span.SetAttributes(attrs...) - span.SetStatus(trace.StatusError, err.Error()) - } - return nil, err + st, msg, err := status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if span != nil && st != nil && st.Code() != codes.NotFound { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, err.Error()) } // try search by using agent's LinearSearchByID method this operation is emergency fallback, the search quality is not same as usual LinearSearchByID operation. res, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { @@ -1778,62 +1702,28 @@ func (s *server) Update( } if !req.GetConfig().GetSkipStrictExistCheck() { - vec, err := s.getObject(ctx, uuid) - if err != nil || vec == nil { - var ( - attrs trace.Attributes - st *status.Status - msg string - ) - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = status.WrapWithNotFound(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API uuid "+uuid+"'s object not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeNotFound(err.Error()) - default: - code := codes.Unknown - if err == nil { - err = errors.ErrObjectIDNotFound(uuid) - code = codes.NotFound - } - st, msg, err = status.ParseError(err, code, vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) - } - if span != nil { + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) + if err != nil { + st, msg, err := status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if span != nil && st != nil { span.RecordError(err) - span.SetAttributes(attrs...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err } if conv.F32stos(vec.GetVector()) == conv.F32stos(req.GetVector().GetVector()) { - if err == nil { - err = errors.ErrSameVectorAlreadyExists(uuid, vec.GetVector(), req.GetVector().GetVector()) + if vec.GetTimestamp() < req.GetVector().GetTimestamp() { + return s.UpdateTimestamp(ctx, &payload.Update_TimestampRequest{ + Id: uuid, + Timestamp: req.GetVector().GetTimestamp(), + }) } + err = errors.ErrSameVectorAlreadyExists(uuid, vec.GetVector(), req.GetVector().GetVector()) st, msg, err := status.ParseError(err, codes.AlreadyExists, "error "+vald.UpdateRPCName+" API ID = "+uuid+"'s same vector data already exists", &errdetails.RequestInfo{ @@ -1858,7 +1748,7 @@ func (s *server) Update( } } var now int64 - if req.GetConfig().GetTimestamp() != 0 { + if req.GetConfig().GetTimestamp() >= 0 { now = req.GetConfig().GetTimestamp() } else { now = time.Now().UnixNano() @@ -2121,6 +2011,221 @@ func (s *server) MultiUpdate( return locs, errs } +func (s *server) UpdateTimestamp( + ctx context.Context, req *payload.Update_TimestampRequest, +) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.UpdateRPCServiceName+"/"+vald.UpdateTimestampRPCName), apiName+"/"+vald.UpdateTimestampRPCName) + defer func() { + if span != nil { + span.End() + } + }() + uuid := req.GetId() + reqInfo := &errdetails.RequestInfo{ + RequestId: uuid, + ServingData: errdetails.Serialize(req), + } + resInfo := &errdetails.ResourceInfo{ + ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateTimestampRPCName + "." + vald.GetObjectRPCName, + ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), + } + if len(uuid) == 0 { + err = errors.ErrInvalidMetaDataConfig + err = status.WrapWithInvalidArgument(vald.UpdateTimestampRPCName+" API invalid uuid", err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "invalid id", + Description: err.Error(), + }, + }, + }) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeInvalidArgument(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + ts := req.GetTimestamp() + if ts < 0 { + err = errors.ErrInvalidTimestamp(ts) + err = status.WrapWithInvalidArgument(vald.UpdateTimestampRPCName+" API invalid vector argument", err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "timestamp", + Description: err.Error(), + }, + }, + }, info.Get()) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeInvalidArgument(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + var ( + mu sync.RWMutex + aeCount atomic.Uint64 + updated atomic.Uint64 + ls = make([]string, 0, s.replica) + visited = make(map[string]bool, s.replica) + locs = &payload.Object_Location{ + Uuid: uuid, + Ips: make([]string, 0, s.replica), + } + ) + err = s.gateway.BroadCast(ctx, service.WRITE, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) (err error) { + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "BroadCast/"+target), apiName+"/"+vald.UpdateTimestampRPCName+"/"+target) + defer func() { + if span != nil { + span.End() + } + }() + loc, err := vc.UpdateTimestamp(ctx, req, copts...) + if err != nil { + st, ok := status.FromError(err) + if ok && st != nil { + if st.Code() != codes.AlreadyExists && + st.Code() != codes.Canceled && + st.Code() != codes.DeadlineExceeded && + st.Code() != codes.InvalidArgument && + st.Code() != codes.NotFound && + st.Code() != codes.OK && + st.Code() != codes.Unimplemented { + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), fmt.Sprintf("UpdateTimestamp operation for Agent %s failed,\terror: %v", target, err))...) + span.SetStatus(trace.StatusError, err.Error()) + } + return err + } + if st.Code() == codes.AlreadyExists { + host, _, err := net.SplitHostPort(target) + if err != nil { + host = target + } + aeCount.Add(1) + mu.Lock() + visited[target] = true + locs.Ips = append(locs.GetIps(), host) + ls = append(ls, host) + mu.Unlock() + + } + } + return nil + } + if loc != nil { + updated.Add(1) + mu.Lock() + visited[target] = true + locs.Ips = append(locs.GetIps(), loc.GetIps()...) + ls = append(ls, loc.GetName()) + mu.Unlock() + } + return nil + }) + switch { + case err != nil: + st, msg, err := status.ParseError(err, codes.Internal, + "failed to parse "+vald.UpdateTimestampRPCName+" gRPC error response", reqInfo, resInfo, info.Get()) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + case len(locs.Ips) <= 0: + err = errors.ErrIndexNotFound + err = status.WrapWithNotFound(vald.UpdateTimestampRPCName+" API update target not found", err, reqInfo, resInfo) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeNotFound(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + case updated.Load()+aeCount.Load() < uint64(s.replica): + shortage := s.replica - int(updated.Load()+aeCount.Load()) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) + if err != nil { + st, msg, err := status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.UpdateTimestampRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if span != nil && st != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + + err = s.gateway.DoMulti(ctx, shortage, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) (err error) { + mu.RLock() + tf, ok := visited[target] + mu.RUnlock() + if tf && ok { + return errors.Errorf("target: %s already inserted will skip", target) + } + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "DoMulti/"+target), apiName+"/"+vald.InsertRPCName+"/"+target) + defer func() { + if span != nil { + span.End() + } + }() + loc, err := vc.Insert(ctx, &payload.Insert_Request{ + Vector: vec, + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Timestamp: ts, + }, + }, copts...) + if err != nil { + st, ok := status.FromError(err) + if ok && st != nil && span != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), fmt.Sprintf("Shortage index Insert for Update operation for Agent %s failed,\terror: %v", target, err))...) + span.SetStatus(trace.StatusError, err.Error()) + } + return err + } + if loc != nil { + updated.Add(1) + mu.Lock() + locs.Ips = append(locs.GetIps(), loc.GetIps()...) + ls = append(ls, loc.GetName()) + mu.Unlock() + } + return nil + }) + if err != nil { + st, msg, err := status.ParseError(err, codes.Unknown, vald.InsertRPCName+" API for "+vald.UpdateTimestampRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if span != nil && st != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + case updated.Load() == 0 && aeCount.Load() > 0: + err = status.WrapWithAlreadyExists(vald.UpdateTimestampRPCName+" API update target same vector already exists", errors.ErrSameVectorAlreadyExists(uuid, nil, nil), reqInfo, resInfo) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeAlreadyExists(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + + } + slices.Sort(ls) + locs.Name = strings.Join(ls, ",") + return locs, nil +} + func (s *server) Upsert( ctx context.Context, req *payload.Upsert_Request, ) (loc *payload.Object_Location, err error) { @@ -2180,48 +2285,23 @@ func (s *server) Upsert( } var shouldInsert bool if !req.GetConfig().GetSkipStrictExistCheck() { - vec, err := s.getObject(ctx, uuid) - var ( - attrs trace.Attributes - st *status.Status - msg string - ) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) + var attrs trace.Attributes if err != nil || vec == nil { - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = nil - shouldInsert = true - default: - st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + var ( + st *status.Status + msg string + ) + st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) + if st != nil { attrs = trace.FromGRPCStatus(st.Code(), msg) - if st != nil && st.Code() == codes.NotFound { - err = nil + if st.Code() == codes.NotFound { shouldInsert = true + err = nil } } } else if conv.F32stos(vec.GetVector()) == conv.F32stos(req.GetVector().GetVector()) { @@ -2236,7 +2316,6 @@ func (s *server) Upsert( } return nil, err } - } else { id, err := s.exists(ctx, uuid) if err != nil { diff --git a/pkg/gateway/lb/handler/grpc/handler_test.go b/pkg/gateway/lb/handler/grpc/handler_test.go index cd8ec34a5b..252af51abb 100644 --- a/pkg/gateway/lb/handler/grpc/handler_test.go +++ b/pkg/gateway/lb/handler/grpc/handler_test.go @@ -3168,6 +3168,143 @@ package grpc // } // } // +// func Test_server_UpdateTimestamp(t *testing.T) { +// type args struct { +// ctx context.Context +// req *payload.Update_TimestampRequest +// } +// type fields struct { +// eg errgroup.Group +// gateway service.Gateway +// timeout time.Duration +// replica int +// streamConcurrency int +// multiConcurrency int +// name string +// ip string +// UnimplementedValdServer vald.UnimplementedValdServer +// } +// type want struct { +// wantRes *payload.Object_Location +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Object_Location, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// req:nil, +// }, +// fields: fields { +// eg:nil, +// gateway:nil, +// timeout:nil, +// replica:0, +// streamConcurrency:0, +// multiConcurrency:0, +// name:"", +// ip:"", +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// req:nil, +// }, +// fields: fields { +// eg:nil, +// gateway:nil, +// timeout:nil, +// replica:0, +// streamConcurrency:0, +// multiConcurrency:0, +// name:"", +// ip:"", +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// s := &server{ +// eg: test.fields.eg, +// gateway: test.fields.gateway, +// timeout: test.fields.timeout, +// replica: test.fields.replica, +// streamConcurrency: test.fields.streamConcurrency, +// multiConcurrency: test.fields.multiConcurrency, +// name: test.fields.name, +// ip: test.fields.ip, +// UnimplementedValdServer: test.fields.UnimplementedValdServer, +// } +// +// gotRes, err := s.UpdateTimestamp(test.args.ctx, test.args.req) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_server_Upsert(t *testing.T) { // type args struct { // ctx context.Context @@ -5729,3 +5866,140 @@ package grpc // }) // } // } +// +// func Test_server_IndexProperty(t *testing.T) { +// type args struct { +// ctx context.Context +// in1 *payload.Empty +// } +// type fields struct { +// eg errgroup.Group +// gateway service.Gateway +// timeout time.Duration +// replica int +// streamConcurrency int +// multiConcurrency int +// name string +// ip string +// UnimplementedValdServer vald.UnimplementedValdServer +// } +// type want struct { +// wantDetail *payload.Info_Index_PropertyDetail +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Info_Index_PropertyDetail, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotDetail *payload.Info_Index_PropertyDetail, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotDetail, w.wantDetail) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDetail, w.wantDetail) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// in1:nil, +// }, +// fields: fields { +// eg:nil, +// gateway:nil, +// timeout:nil, +// replica:0, +// streamConcurrency:0, +// multiConcurrency:0, +// name:"", +// ip:"", +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// in1:nil, +// }, +// fields: fields { +// eg:nil, +// gateway:nil, +// timeout:nil, +// replica:0, +// streamConcurrency:0, +// multiConcurrency:0, +// name:"", +// ip:"", +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// s := &server{ +// eg: test.fields.eg, +// gateway: test.fields.gateway, +// timeout: test.fields.timeout, +// replica: test.fields.replica, +// streamConcurrency: test.fields.streamConcurrency, +// multiConcurrency: test.fields.multiConcurrency, +// name: test.fields.name, +// ip: test.fields.ip, +// UnimplementedValdServer: test.fields.UnimplementedValdServer, +// } +// +// gotDetail, err := s.IndexProperty(test.args.ctx, test.args.in1) +// if err := checkFunc(test.want, gotDetail, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/gateway/lb/service/gateway.go b/pkg/gateway/lb/service/gateway.go index 53c259c9e1..2d806b20fc 100644 --- a/pkg/gateway/lb/service/gateway.go +++ b/pkg/gateway/lb/service/gateway.go @@ -24,6 +24,7 @@ import ( "github.com/vdaas/vald/apis/grpc/v1/vald" "github.com/vdaas/vald/internal/client/v1/client/discoverer" + vc "github.com/vdaas/vald/internal/client/v1/client/vald" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/observability/trace" @@ -94,7 +95,7 @@ func (g *gateway) BroadCast( case <-ictx.Done(): return nil default: - err = f(ictx, addr, vald.NewValdClient(conn), copts...) + err = f(ictx, addr, vc.NewValdClient(conn), copts...) if err != nil { return err } @@ -129,7 +130,7 @@ func (g *gateway) DoMulti( copts ...grpc.CallOption, ) (err error) { if atomic.LoadUint32(&cur) < limit { - err = f(ictx, addr, vald.NewValdClient(conn), copts...) + err = f(ictx, addr, vc.NewValdClient(conn), copts...) if err != nil { return err } @@ -147,7 +148,7 @@ func (g *gateway) DoMulti( if atomic.LoadUint32(&cur) < limit { _, ok := visited.Load(addr) if !ok { - err = f(ictx, addr, vald.NewValdClient(conn), copts...) + err = f(ictx, addr, vc.NewValdClient(conn), copts...) if err != nil { return err } diff --git a/pkg/gateway/mirror/handler/grpc/handler.go b/pkg/gateway/mirror/handler/grpc/handler.go index 45226b4806..7065b1e177 100644 --- a/pkg/gateway/mirror/handler/grpc/handler.go +++ b/pkg/gateway/mirror/handler/grpc/handler.go @@ -1173,7 +1173,7 @@ func (s *server) handleInsert( } // In this case, the status code in the result object contains only OK or ALREADY_EXIST. - // And send Update API requst to ALREADY_EXIST cluster using the query requested by the user. + // And send Update API request to ALREADY_EXIST cluster using the query requested by the user. log.Warnf("failed to "+vald.InsertRPCName+" API: %#v", err) resLoc, err := s.handleInsertResult(ctx, alreadyExistsTgts, &payload.Update_Request{ @@ -1743,7 +1743,7 @@ func (s *server) handleUpdate( } // In this case, the status code in the result object contains only OK or ALREADY_EXIST or NOT_FOUND. - // And send Insert API requst to NOT_FOUND cluster using query requested by the user. + // And send Insert API request to NOT_FOUND cluster using query requested by the user. log.Warnf("failed to "+vald.UpdateRPCName+" API: %#v", err) resLoc, err := s.handleUpdateResult(ctx, notFoundTgts, &payload.Insert_Request{ @@ -2382,7 +2382,7 @@ func (s *server) doUpsert( return loc, nil } -// StreamUpsert handles bidirectional streaming for upserting objects. +// StreamUpsert handles bidirectional streaming for upsert objects. // It wraps the bidirectional stream logic for the Upsert RPC method. // For each incoming request in the bidirectional stream, it calls the Upsert function. // The response is then sent back through the stream with the corresponding status or location information. @@ -3348,7 +3348,7 @@ func (s *server) doStreamListObject( err = status.WrapWithCanceled("Stream Recv returned canceld error at "+id, err) attr = trace.StatusCodeCancelled(err.Error()) case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded("Stream Recv returned deadlin exceeded error at "+id, err) + err = status.WrapWithDeadlineExceeded("Stream Recv returned deadline exceeded error at "+id, err) attr = trace.StatusCodeDeadlineExceeded(err.Error()) default: var ( @@ -3387,7 +3387,7 @@ func (s *server) doStreamListObject( err = status.WrapWithCanceled("Stream Send returned canceld error at "+id, err) attr = trace.StatusCodeCancelled(err.Error()) case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded("Stream Send returned deadlin exceeded error at "+id, err) + err = status.WrapWithDeadlineExceeded("Stream Send returned deadline exceeded error at "+id, err) attr = trace.StatusCodeDeadlineExceeded(err.Error()) default: var ( diff --git a/pkg/gateway/mirror/service/mirror.go b/pkg/gateway/mirror/service/mirror.go index b5ab31609b..5a2e68c920 100644 --- a/pkg/gateway/mirror/service/mirror.go +++ b/pkg/gateway/mirror/service/mirror.go @@ -64,10 +64,10 @@ func NewMirrorClient(conn *grpc.ClientConn) MirrorClient { } type mirr struct { - addrl sync.Map[string, any] // List of all connected addresses + addrs sync.Map[string, any] // List of all connected addresses selfMirrTgts []*payload.Mirror_Target // Targets of self mirror gateway - selfMirrAddrl sync.Map[string, any] // List of self Mirror gateway addresses - gwAddrl sync.Map[string, any] // List of Vald gateway (LB gateway) addresses + selfMirrAddrs sync.Map[string, any] // List of self Mirror gateway addresses + gwAddrs sync.Map[string, any] // List of Vald gateway (LB gateway) addresses eg errgroup.Group registerDur time.Duration gateway Gateway @@ -90,7 +90,7 @@ func NewMirror(opts ...MirrorOption) (_ Mirror, err error) { } m.selfMirrTgts = make([]*payload.Mirror_Target, 0) - m.selfMirrAddrl.Range(func(addr string, _ any) bool { + m.selfMirrAddrs.Range(func(addr string, _ any) bool { var ( host string port uint16 @@ -317,15 +317,15 @@ func (m *mirr) Connect(ctx context.Context, targets ...*payload.Mirror_Target) e for _, target := range targets { addr := net.JoinHostPort(target.GetHost(), uint16(target.GetPort())) // addr: host:port if !m.isSelfMirrorAddr(addr) && !m.isGatewayAddr(addr) { - _, ok := m.addrl.Load(addr) + _, ok := m.addrs.Load(addr) if !ok || !m.IsConnected(ctx, addr) { _, err := m.gateway.GRPCClient().Connect(ctx, addr) if err != nil { - m.addrl.Delete(addr) + m.addrs.Delete(addr) return err } } - m.addrl.Store(addr, struct{}{}) + m.addrs.Store(addr, struct{}{}) } } return nil @@ -345,13 +345,13 @@ func (m *mirr) Disconnect(ctx context.Context, targets ...*payload.Mirror_Target for _, target := range targets { addr := net.JoinHostPort(target.GetHost(), uint16(target.GetPort())) if !m.isGatewayAddr(addr) { - _, ok := m.addrl.Load(addr) + _, ok := m.addrs.Load(addr) if ok || m.IsConnected(ctx, addr) { if err := m.gateway.GRPCClient().Disconnect(ctx, addr); err != nil && !errors.Is(err, errors.ErrGRPCClientConnNotFound(addr)) { return err } - m.addrl.Delete(addr) + m.addrs.Delete(addr) } } } @@ -366,7 +366,7 @@ func (m *mirr) IsConnected(ctx context.Context, addr string) bool { // MirrorTargets returns the Mirror targets, including the address of this gateway and the addresses of other Mirror gateways // to which this gateway is currently connected. func (m *mirr) MirrorTargets(ctx context.Context) (tgts []*payload.Mirror_Target, err error) { - tgts = make([]*payload.Mirror_Target, 0, m.addrl.Len()) + tgts = make([]*payload.Mirror_Target, 0, m.addrs.Len()) m.RangeMirrorAddr(func(addr string, _ any) bool { if m.IsConnected(ctx, addr) { var ( @@ -391,12 +391,12 @@ func (m *mirr) MirrorTargets(ctx context.Context) (tgts []*payload.Mirror_Target } func (m *mirr) isSelfMirrorAddr(addr string) bool { - _, ok := m.selfMirrAddrl.Load(addr) + _, ok := m.selfMirrAddrs.Load(addr) return ok } func (m *mirr) isGatewayAddr(addr string) bool { - _, ok := m.gwAddrl.Load(addr) + _, ok := m.gwAddrs.Load(addr) return ok } @@ -413,7 +413,7 @@ func (m *mirr) connectedOtherMirrorAddrs(ctx context.Context) (addrs []string) { // RangeMirrorAddr calls f sequentially for each key and value present in the connection map. If f returns false, range stops the iteration. func (m *mirr) RangeMirrorAddr(f func(addr string, _ any) bool) { - m.addrl.Range(func(addr string, value any) bool { + m.addrs.Range(func(addr string, value any) bool { if !m.isGatewayAddr(addr) && !m.isSelfMirrorAddr(addr) { if !f(addr, value) { return false diff --git a/pkg/gateway/mirror/service/mirror_option.go b/pkg/gateway/mirror/service/mirror_option.go index 1b7243c382..ed605911eb 100644 --- a/pkg/gateway/mirror/service/mirror_option.go +++ b/pkg/gateway/mirror/service/mirror_option.go @@ -44,7 +44,7 @@ func WithGatewayAddrs(addrs ...string) MirrorOption { return errors.NewErrCriticalOption("lbAddrs", addrs) } for _, addr := range addrs { - m.gwAddrl.Store(addr, struct{}{}) + m.gwAddrs.Store(addr, struct{}{}) } return nil } @@ -57,7 +57,7 @@ func WithSelfMirrorAddrs(addrs ...string) MirrorOption { return errors.NewErrCriticalOption("selfMirrorAddrs", addrs) } for _, addr := range addrs { - m.selfMirrAddrl.Store(addr, struct{}{}) + m.selfMirrAddrs.Store(addr, struct{}{}) } return nil } diff --git a/pkg/gateway/mirror/service/mirror_test.go b/pkg/gateway/mirror/service/mirror_test.go index d23f204fcd..edf41c8191 100644 --- a/pkg/gateway/mirror/service/mirror_test.go +++ b/pkg/gateway/mirror/service/mirror_test.go @@ -734,10 +734,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // ctx context.Context // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -769,10 +769,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // ctx:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -797,10 +797,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // ctx:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -834,10 +834,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -857,10 +857,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // tgts *payload.Mirror_Targets // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -897,10 +897,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // tgts:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -926,10 +926,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // tgts:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -963,10 +963,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -986,10 +986,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr string // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1022,10 +1022,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1051,10 +1051,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1088,10 +1088,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -1110,10 +1110,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr string // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1145,10 +1145,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1173,10 +1173,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1210,10 +1210,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -1232,10 +1232,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr string // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1267,10 +1267,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1295,10 +1295,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1332,10 +1332,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -1354,10 +1354,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // f func(addr string, _ any) bool // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1384,10 +1384,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // f:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1412,10 +1412,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // f:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1449,10 +1449,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, diff --git a/pkg/gateway/mirror/usecase/vald.go b/pkg/gateway/mirror/usecase/vald.go index 453c7344c4..12976fda0e 100644 --- a/pkg/gateway/mirror/usecase/vald.go +++ b/pkg/gateway/mirror/usecase/vald.go @@ -23,9 +23,9 @@ import ( "github.com/vdaas/vald/internal/net" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/observability" - bometrics "github.com/vdaas/vald/internal/observability/metrics/backoff" + backoffmetrics "github.com/vdaas/vald/internal/observability/metrics/backoff" cbmetrics "github.com/vdaas/vald/internal/observability/metrics/circuitbreaker" - mirrmetrics "github.com/vdaas/vald/internal/observability/metrics/gateway/mirror" + mirrormetrics "github.com/vdaas/vald/internal/observability/metrics/gateway/mirror" "github.com/vdaas/vald/internal/runner" "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/internal/servers/server" @@ -135,9 +135,9 @@ func New(cfg *config.Data) (r runner.Runner, err error) { if cfg.Observability.Enabled { obs, err = observability.NewWithConfig( cfg.Observability, - bometrics.New(), + backoffmetrics.New(), cbmetrics.New(), - mirrmetrics.New(m), + mirrormetrics.New(m), ) if err != nil { return nil, err diff --git a/pkg/index/job/correction/service/corrector.go b/pkg/index/job/correction/service/corrector.go index 41a71b2274..5bcf7a6e96 100644 --- a/pkg/index/job/correction/service/corrector.go +++ b/pkg/index/job/correction/service/corrector.go @@ -87,7 +87,7 @@ func New(opts ...Option) (_ Corrector, err error) { log.Errorf("failed to create dir %s", dir) return nil, err } - path := file.Join(dir, "checkedid.db") + path := file.Join(dir, "checked_id.db") db, err := pogreb.New(pogreb.WithPath(path), pogreb.WithBackgroundCompactionInterval(c.backgroundCompactionInterval), pogreb.WithBackgroundSyncInterval(c.backgroundSyncInterval)) @@ -136,15 +136,22 @@ func (c *correct) Start(ctx context.Context) (err error) { return err } counts := detail.GetCounts() - agents := make([]string, 0, detail.GetLiveAgents()) - for agent, count := range counts { - log.Infof("index info: addr(%s), stored(%d), uncommitted(%d), indexing=%t, saving=%t", agent, count.GetStored(), count.GetUncommitted(), count.GetIndexing(), count.GetSaving()) + agents := make([]string, 0, len(counts)) + for agent := range counts { agents = append(agents, agent) } slices.SortFunc(agents, func(left, right string) int { - return cmp.Compare(counts[left].GetStored(), counts[right].GetStored()) + return cmp.Compare(counts[right].GetStored(), counts[left].GetStored()) }) + for _, agent := range agents { + count, ok := counts[agent] + if ok && count != nil { + log.Infof("index info: addr(%s), stored(%d), uncommitted(%d), indexing=%t, saving=%t", agent, count.GetStored(), count.GetUncommitted(), count.GetIndexing(), count.GetSaving()) + } + } + log.Infof("sorted agents: %v,\tdiscovered agents: %v", agents, c.discoverer.GetAddrs(ctx)) + errs := make([]error, 0, len(agents)) emptyReq := new(payload.Object_List_Request) @@ -177,25 +184,27 @@ func (c *correct) Start(ctx context.Context) (err error) { uncommitted uint32 indexing bool saving bool + debugMsg string ) count, ok := counts[addr] if ok && count != nil { stored = count.GetStored() uncommitted = count.GetUncommitted() + indexing = count.GetIndexing() + saving = count.GetSaving() + debugMsg = fmt.Sprintf("agent %s (total index detail = stored: %d, uncommitted: %d, indexing=%t, saving=%t), stream concurrency: %d, processing %d/%d, replicas: size(%d) = addrs%v", addr, stored, uncommitted, indexing, saving, c.streamListConcurrency, corrected, len(agents), len(replicas), replicas) if stored+uncommitted == 0 { // id no indices in agent skip process + log.Warnf("skipping index correction process due to zero index detected for %s", debugMsg) return nil } - indexing = count.GetIndexing() - saving = count.GetSaving() } - debugMsg := fmt.Sprintf("agent %s (stored: %d, uncommitted: %d, indexing=%t, saving=%t), stream concurrency: %d, processing %d/%d, replicas: size(%d) = addrs%v", addr, stored, uncommitted, indexing, saving, c.streamListConcurrency, corrected, len(agents), len(replicas), replicas) eg, egctx := errgroup.WithContext(ctx) eg.SetLimit(c.streamListConcurrency) ctx, cancel := context.WithCancelCause(egctx) - stream, err := vald.NewObjectClient(conn).StreamListObject(ctx, emptyReq, copts...) - if err != nil { + stream, err := vc.NewValdClient(conn).StreamListObject(ctx, emptyReq, copts...) + if err != nil || stream == nil { return err } log.Infof("starting correction for %s", debugMsg) @@ -207,15 +216,15 @@ func (c *correct) Start(ctx context.Context) (err error) { if !errors.Is(ctx.Err(), context.Canceled) { log.Errorf("context done unexpectedly: %v for %s", ctx.Err(), debugMsg) } - if context.Cause(ctx) != io.EOF { + if !errors.Is(context.Cause(ctx), io.EOF) { log.Errorf("context canceled due to %v for %s", ctx.Err(), debugMsg) } err = eg.Wait() if err != nil { - log.Errorf("errgroup returned error: %v for %s", ctx.Err(), debugMsg) - return err + log.Errorf("correction returned error status errgroup returned error: %v for %s", ctx.Err(), debugMsg) + } else { + log.Infof("correction finished for %s", debugMsg) } - log.Infof("correction finished for %s", debugMsg) return nil default: res, err := stream.Recv() @@ -225,27 +234,12 @@ func (c *correct) Start(ctx context.Context) (err error) { } else { cancel(errors.ErrStreamListObjectStreamFinishedUnexpectedly(err)) } - } else { + } else if res != nil && res.GetVector() != nil && res.GetVector().GetId() != "" && res.GetVector().GetTimestamp() < start.UnixNano() { eg.Go(safety.RecoverFunc(func() (err error) { vec := res.GetVector() - if vec == nil || vec.GetId() == "" { - st := res.GetStatus() - if st != nil { - log.Errorf("invalid vector id: %s detected and returned status code: %d, message: %s, details: %v, debug: %s", vec.GetId(), st.GetCode(), st.GetMessage(), st.GetDetails(), debugMsg) - } - return errors.ErrFailedToReceiveVectorFromStream - } - - // skip if the vector is inserted after correction start - if vec.GetTimestamp() > start.UnixNano() { - log.Debugf("index correction process for ID: %s skipped due to newer timestamp detected. job started at %s but object timestamp is %s", - vec.GetId(), - start.Format(time.RFC3339Nano), - time.Unix(0, vec.GetTimestamp()).Format(time.RFC3339Nano)) - return nil - } - + ts := vec.GetTimestamp() id := vec.GetId() + _, ok, err := c.checkedList.Get(id) if err != nil { log.Errorf("failed to perform Get from check list but still try to finish processing without cache: %v", err) @@ -264,418 +258,35 @@ func (c *correct) Start(ctx context.Context) (err error) { // Therefore, the process is only to correct the missing replicas. if len(replicas) <= 0 { diff := c.indexReplica - 1 - addrs := c.discoverer.GetAddrs(egctx) // correct index replica shortage if diff > 0 { - log.Infof("replica shortage(diff=%d) of vector id: %s detected from last %s. inserting to other agents = %v", diff, id, debugMsg, addrs) - if len(addrs) == 0 { - return errors.ErrNoAvailableAgentToInsert - } - req := &payload.Insert_Request{ - Vector: vec, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Insert_Config{ - Timestamp: vec.GetTimestamp(), - }, - } - for _, daddr := range addrs { - if diff > 0 && daddr != addr { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.InsertRPCServiceName+"/"+vald.InsertRPCName), daddr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - client := vald.NewValdClient(conn) - _, err := client.Insert(ctx, req, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.AlreadyExists { - obj, err := client.GetObject(ctx, &payload.Object_VectorRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - if obj.GetTimestamp() < vec.GetTimestamp() { - _, err := client.Update(ctx, &payload.Update_Request{ - Vector: vec, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: vec.GetTimestamp() - 1, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - c.correctedOldIndexCount.Add(1) - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to insert object to agent(%s): %w", daddr, err)) - } - } - } + return c.correctShortage(egctx, id, addr, debugMsg, vec, make(map[string]*payload.Object_Timestamp), diff) } return nil } - var ( - latest int64 - mu sync.Mutex - found = make(map[string]*payload.Object_Timestamp, len(addr)) - latestAgent = addr - ) // load index replica from other agents and store it to found map - if err := c.discoverer.GetClient().OrderedRangeConcurrent(egctx, replicas, len(replicas), - func(ctx context.Context, addr string, conn *grpc.ClientConn, copts ...grpc.CallOption) error { - ots, err := vald.NewObjectClient(conn).GetTimestamp(ctx, &payload.Object_TimestampRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call GetTimestamp to agent: %s, id: %s returned not a gRPC status error: %v", addr, id, err) - return err - } else if st.Code() == codes.NotFound { - // when replica of agent > index replica, this happens - return nil - } else if st.Code() == codes.Canceled { - return nil - } else { - log.Errorf("failed to GetTimestamp with unexpected error. agent: %s, id: %s, code: %v, message: %s", addr, id, st.Code(), st.Message()) - return err - } - } - - // skip if the vector is inserted after correction start - if ots.GetTimestamp() > start.UnixNano() { - log.Debugf("timestamp of vector(id: %s, timestamp: %v) is newer than correction start time(%v). skipping...", - ots.GetId(), - ots.GetTimestamp(), - start.UnixNano(), - ) - return nil - } - mu.Lock() - found[addr] = ots - if latest < ots.GetTimestamp() { - latest = ots.GetTimestamp() - if latest > vec.GetTimestamp() { - latestAgent = addr - } - } - mu.Unlock() - return nil - }, - ); err != nil { + found, skipped, latest, latestAgent, err := c.loadReplicaInfo(egctx, addr, id, replicas, counts, ts, start) + if err != nil { return err } - latestObject := vec - - // current object timestamp is not latest get latest object from other agent index replica - if vec.GetTimestamp() < latest && latestAgent != addr { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.ObjectRPCServiceName+"/"+vald.GetObjectRPCName), latestAgent, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - obj, err := vald.NewObjectClient(conn).GetObject(ctx, &payload.Object_VectorRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - if obj.GetTimestamp() >= latest && obj.GetId() != "" && obj.GetVector() != nil { - latestObject = obj + if len(found) != 0 && ((len(replicas) > 0 && len(skipped) == 0) || (len(skipped) > 0 && len(skipped) < len(replicas))) { + // current object timestamp is not latest get latest object from other agent index replica + if ts < latest && latestAgent != addr { + latestObject := c.getLatestObject(egctx, id, addr, latestAgent, latest) + if latestObject != nil && latestObject.GetVector() != nil && latestObject.GetId() != "" && latestObject.GetTimestamp() >= latest { + vec = latestObject } - return obj, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to load latest object id: %s, agent: %s, timestamp: %d, error: %w", id, addr, latest, err)) } + c.correctTimestamp(ctx, id, vec, found) + } else if len(skipped) > 0 { + log.Debugf("timestamp correction for index id %s skipped, replica %s, skipped agents: %v", id, addr, skipped) } - if latestObject.Timestamp < latest { - latestObject.Timestamp = latest - } - tss := time.Unix(0, latestObject.GetTimestamp()).Format(time.RFC3339Nano) // timestamp string - for addr, ots := range found { // correct timestamp inconsistency - if latestObject.GetTimestamp() > ots.GetTimestamp() { - log.Infof("timestamp inconsistency detected with vector(id: %s, timestamp: %s). updating with the latest vector(id: %s, timestamp: %s)", - ots.GetId(), - time.Unix(0, ots.GetTimestamp()).Format(time.RFC3339Nano), - latestObject.GetId(), - tss, - ) - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.UpdateRPCServiceName+"/"+vald.UpdateRPCName), addr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - client := vald.NewValdClient(conn) - // TODO: use UpdateTimestamp when it's implemented because here we just want to update only the timestamp but not the vector - _, err := client.Update(ctx, &payload.Update_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: latestObject.GetTimestamp() - 1, - }, - }, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - _, err = client.Insert(ctx, &payload.Insert_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Insert_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: latestObject.GetTimestamp(), - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.AlreadyExists { - obj, err := client.GetObject(ctx, &payload.Object_VectorRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - if obj.GetTimestamp() < latestObject.GetTimestamp() { - _, err = client.Update(ctx, &payload.Update_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: latestObject.GetTimestamp() - 1, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - } - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - c.correctedOldIndexCount.Add(1) - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - log.Infof("vector successfully updated. address: %s, uuid: %s, timestamp: %s", addr, latestObject.GetId(), tss) - c.correctedOldIndexCount.Add(1) - return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to fix timestamp to %s for id %s agent %s error: %w", tss, id, addr, err)) - } - } - } - currentNumberOfIndexReplica := len(found) + 1 - diff := c.indexReplica - currentNumberOfIndexReplica - addrs := c.discoverer.GetAddrs(egctx) + diff := c.indexReplica - (len(found) + 1) if diff > 0 { // correct index replica shortage - log.Infof("replica shortage(diff=%d) of vector id: %s detected for %s. inserting to other agents = %v", diff, id, debugMsg, addrs) - if len(addrs) == 0 { - return errors.ErrNoAvailableAgentToInsert - } - req := &payload.Insert_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Insert_Config{ - Timestamp: latestObject.GetTimestamp(), - }, - } - for _, daddr := range addrs { - if diff > 0 && daddr != addr { - _, ok := found[daddr] - if !ok { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.InsertRPCServiceName+"/"+vald.InsertRPCName), daddr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - client := vald.NewValdClient(conn) - _, err := client.Insert(ctx, req, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.AlreadyExists { - obj, err := client.GetObject(ctx, &payload.Object_VectorRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - if obj.GetTimestamp() < latestObject.GetTimestamp() { - _, err = client.Update(ctx, &payload.Update_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: latestObject.GetTimestamp() - 1, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - c.correctedOldIndexCount.Add(1) - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to insert object to agent(%s): %w", daddr, err)) - } - } - } - } + return c.correctShortage(egctx, id, addr, debugMsg, vec, found, diff) } else if diff < 0 { // correct index replica oversupply - log.Infof("replica oversupply of vector %s. deleting...", id) - if len(addrs) == 0 { - return errors.ErrNoAvailableAgentToRemove - } - req := &payload.Remove_Request{ - Id: &payload.Object_ID{ - Id: id, - }, - } - for _, daddr := range addrs { - if diff < 0 { - _, ok := found[daddr] - if ok || daddr == addr { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.RemoveRPCServiceName+"/"+vald.RemoveRPCName), daddr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - _, err := vald.NewRemoveClient(conn).Remove(ctx, req, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - diff++ - c.correctedReplicationCount.Add(1) - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - diff++ - c.correctedReplicationCount.Add(1) - return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to delete object from agent(%s): %w", daddr, err)) - } - } - } - } + return c.correctOversupply(egctx, id, addr, debugMsg, found, diff) } return nil })) @@ -710,3 +321,320 @@ func (c *correct) NumberOfCorrectedOldIndex() uint64 { func (c *correct) NumberOfCorrectedReplication() uint64 { return c.correctedReplicationCount.Load() } + +func (c *correct) loadReplicaInfo( + ctx context.Context, + originAddr, id string, + replicas []string, + counts map[string]*payload.Info_Index_Count, + ts int64, + start time.Time, +) ( + found map[string]*payload.Object_Timestamp, + skipped []string, + latest int64, + latestAgent string, + err error, +) { + var mu sync.Mutex + latestAgent = originAddr + skipped = make([]string, 0, len(replicas)) + found = make(map[string]*payload.Object_Timestamp, c.indexReplica-1) + tss := time.Unix(0, start.UnixNano()).Format(time.RFC3339Nano) + err = c.discoverer.GetClient().OrderedRangeConcurrent(ctx, replicas, len(replicas), + func(ctx context.Context, addr string, conn *grpc.ClientConn, copts ...grpc.CallOption) error { + if originAddr == addr { + return nil + } + count, ok := counts[addr] // counts is read-only we don't need to lock. + if ok && count != nil && count.GetStored() == 0 && count.GetUncommitted() == 0 { + mu.Lock() + skipped = append(skipped, addr) + mu.Unlock() + return nil + } + + ots, err := vc.NewValdClient(conn).GetTimestamp(ctx, &payload.Object_TimestampRequest{ + Id: &payload.Object_ID{ + Id: id, + }, + }) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call GetTimestamp to agent: %s, id: %s returned not a gRPC status error: %v", addr, id, err) + return err + } else if st.Code() == codes.NotFound { + // when replica of agent > index replica, this happens + return nil + } else if st.Code() == codes.Canceled { + return nil + } else { + log.Errorf("failed to GetTimestamp with unexpected error. agent: %s, id: %s, code: %v, message: %s", addr, id, st.Code(), st.Message()) + return err + } + } + + if ots == nil { + // not found + return nil + } + + // skip if the vector is inserted after correction start + if ots.GetTimestamp() > start.UnixNano() { + log.Debugf("timestamp of vector(id: %s, timestamp: %s) is newer than correction start time(%s). skipping...", + ots.GetId(), + time.Unix(0, ots.GetTimestamp()).Format(time.RFC3339Nano), + tss, + ) + return nil + } + mu.Lock() + found[addr] = ots + if latest < ots.GetTimestamp() { + latest = ots.GetTimestamp() + if latest > ts { + latestAgent = addr + } + } + mu.Unlock() + return nil + }, + ) + return +} + +func (c *correct) getLatestObject( + ctx context.Context, id, addr, latestAgent string, latest int64, +) (latestObject *payload.Object_Vector) { + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.ObjectRPCServiceName+"/"+vald.GetObjectRPCName), latestAgent, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + obj, err := vc.NewValdClient(conn).GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: id, + }, + }, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + if obj == nil { + // not found + return nil, nil + } + if obj.GetTimestamp() >= latest && obj.GetId() != "" && obj.GetVector() != nil { + latestObject = obj + } + return obj, nil + }) + if err != nil { + log.Errorf("failed to load latest object id: %s, agent: %s, timestamp: %d, error: %v", id, addr, latest, err) + } + if latestObject != nil && latestObject.GetTimestamp() < latest { + latestObject.Timestamp = latest + } + return latestObject +} + +func (c *correct) correctTimestamp( + ctx context.Context, + id string, + latestObject *payload.Object_Vector, + found map[string]*payload.Object_Timestamp, +) { + tss := time.Unix(0, latestObject.GetTimestamp()).Format(time.RFC3339Nano) // timestamp string + for addr, ots := range found { // correct timestamp inconsistency + if latestObject.GetTimestamp() > ots.GetTimestamp() { + log.Infof("timestamp inconsistency detected with vector(id: %s, timestamp: %s). updating with the latest vector(id: %s, timestamp: %s)", + ots.GetId(), + time.Unix(0, ots.GetTimestamp()).Format(time.RFC3339Nano), + latestObject.GetId(), + tss, + ) + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.UpdateRPCServiceName+"/"+vald.UpdateRPCName), addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + client := vc.NewValdClient(conn) + _, err := client.UpdateTimestamp(ctx, &payload.Update_TimestampRequest{ + Id: latestObject.GetId(), + Timestamp: latestObject.GetTimestamp(), + }, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.Canceled || + st.Code() == codes.AlreadyExists || + st.Code() == codes.InvalidArgument || + st.Code() == codes.NotFound { + return nil, nil + } + return nil, err + } + log.Infof("vector successfully updated. address: %s, uuid: %s, timestamp: %s", addr, latestObject.GetId(), tss) + c.correctedOldIndexCount.Add(1) + return nil, nil + }) + if err != nil { + log.Errorf("failed to fix timestamp to %s for id %s agent %s error: %w", tss, id, addr, err) + } + } + } +} + +func (c *correct) correctOversupply( + ctx context.Context, + id, selfAddr, debugMsg string, + found map[string]*payload.Object_Timestamp, + diff int, +) (err error) { + addrs := c.discoverer.GetAddrs(ctx) + log.Infof("replica oversupply(configured: %d, stored: %d, diff: %d) of vector id: %s detected for %s. deleting from agents = %v", c.indexReplica, len(found)+1, diff, id, debugMsg, found) + if len(addrs) == 0 { + return errors.ErrNoAvailableAgentToRemove + } + req := &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: id, + }, + } + for _, daddr := range addrs { + if diff < 0 { + _, ok := found[daddr] + if ok || daddr == selfAddr { + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.RemoveRPCServiceName+"/"+vald.RemoveRPCName), daddr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + _, err := vc.NewValdClient(conn).Remove(ctx, req, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + diff++ + c.correctedReplicationCount.Add(1) + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + diff++ + c.correctedReplicationCount.Add(1) + return nil, nil + }) + if err != nil { + log.Errorf("failed to delete object from agent(%s): %w", daddr, err) + } + } + } + } + return nil +} + +func (c *correct) correctShortage( + ctx context.Context, + id, selfAddr, debugMsg string, + latestObject *payload.Object_Vector, + found map[string]*payload.Object_Timestamp, + diff int, +) (err error) { + addrs := c.discoverer.GetAddrs(ctx) + log.Infof("replica shortage(configured: %d, stored: %d, diff: %d) of vector id: %s detected for %s. inserting to other agents = %v", c.indexReplica, len(found)+1, diff, id, debugMsg, addrs) + if len(addrs) == 0 { + return errors.ErrNoAvailableAgentToInsert + } + req := &payload.Insert_Request{ + Vector: latestObject, + // TODO: this should be deleted after Config.Timestamp deprecation + Config: &payload.Insert_Config{ + Timestamp: latestObject.GetTimestamp(), + }, + } + for _, daddr := range addrs { + if diff > 0 && daddr != selfAddr { + _, ok := found[daddr] + if !ok { + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.InsertRPCServiceName+"/"+vald.InsertRPCName), daddr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + client := vc.NewValdClient(conn) + _, err := client.Insert(ctx, req, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.AlreadyExists { + var obj *payload.Object_Vector + obj, err = client.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: id, + }, + }, copts...) + if err != nil { + if st, ok = status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + if obj != nil { + if obj.GetTimestamp() < latestObject.GetTimestamp() { + _, err = client.Update(ctx, &payload.Update_Request{ + Vector: latestObject, + // TODO: this should be deleted after Config.Timestamp deprecation + Config: &payload.Update_Config{ + // TODO: Decrementing because it's gonna be incremented before being pushed + // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation + // so we should consider refactoring vqueue. + Timestamp: latestObject.GetTimestamp() - 1, + }, + }, copts...) + if err != nil { + if st, ok = status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + } + diff-- + c.correctedReplicationCount.Add(1) + } + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + diff-- + c.correctedReplicationCount.Add(1) + return nil, nil + }) + if err != nil { + log.Errorf("failed to insert object to agent(%s): %w", daddr, err) + } + } + } + } + return nil +} diff --git a/pkg/index/job/correction/service/corrector_test.go b/pkg/index/job/correction/service/corrector_test.go index f3719260c7..3e905316fd 100644 --- a/pkg/index/job/correction/service/corrector_test.go +++ b/pkg/index/job/correction/service/corrector_test.go @@ -905,3 +905,784 @@ package service // }) // } // } +// +// func Test_correct_loadReplicaInfo(t *testing.T) { +// type args struct { +// ctx context.Context +// originAddr string +// id string +// replicas []string +// counts map[string]*payload.Info_Index_Count +// ts int64 +// start time.Time +// } +// type fields struct { +// eg errgroup.Group +// discoverer discoverer.Client +// gateway vc.Client +// checkedList pogreb.DB +// checkedIndexCount atomic.Uint64 +// correctedOldIndexCount atomic.Uint64 +// correctedReplicationCount atomic.Uint64 +// indexReplica int +// streamListConcurrency int +// backgroundSyncInterval time.Duration +// backgroundCompactionInterval time.Duration +// } +// type want struct { +// wantFound map[string]*payload.Object_Timestamp +// wantSkipped []string +// wantLatest int64 +// wantLatestAgent string +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, map[string]*payload.Object_Timestamp, []string, int64, string, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotFound map[string]*payload.Object_Timestamp, gotSkipped []string, gotLatest int64, gotLatestAgent string, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotFound, w.wantFound) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotFound, w.wantFound) +// } +// if !reflect.DeepEqual(gotSkipped, w.wantSkipped) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotSkipped, w.wantSkipped) +// } +// if !reflect.DeepEqual(gotLatest, w.wantLatest) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLatest, w.wantLatest) +// } +// if !reflect.DeepEqual(gotLatestAgent, w.wantLatestAgent) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLatestAgent, w.wantLatestAgent) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// originAddr:"", +// id:"", +// replicas:nil, +// counts:nil, +// ts:0, +// start:time.Time{}, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// originAddr:"", +// id:"", +// replicas:nil, +// counts:nil, +// ts:0, +// start:time.Time{}, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &correct{ +// eg: test.fields.eg, +// discoverer: test.fields.discoverer, +// gateway: test.fields.gateway, +// checkedList: test.fields.checkedList, +// checkedIndexCount: test.fields.checkedIndexCount, +// correctedOldIndexCount: test.fields.correctedOldIndexCount, +// correctedReplicationCount: test.fields.correctedReplicationCount, +// indexReplica: test.fields.indexReplica, +// streamListConcurrency: test.fields.streamListConcurrency, +// backgroundSyncInterval: test.fields.backgroundSyncInterval, +// backgroundCompactionInterval: test.fields.backgroundCompactionInterval, +// } +// +// gotFound, gotSkipped, gotLatest, gotLatestAgent, err := c.loadReplicaInfo( +// test.args.ctx, +// test.args.originAddr, +// test.args.id, +// test.args.replicas, +// test.args.counts, +// test.args.ts, +// test.args.start, +// ) +// if err := checkFunc(test.want, gotFound, gotSkipped, gotLatest, gotLatestAgent, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_correct_getLatestObject(t *testing.T) { +// type args struct { +// ctx context.Context +// id string +// addr string +// latestAgent string +// latest int64 +// } +// type fields struct { +// eg errgroup.Group +// discoverer discoverer.Client +// gateway vc.Client +// checkedList pogreb.DB +// checkedIndexCount atomic.Uint64 +// correctedOldIndexCount atomic.Uint64 +// correctedReplicationCount atomic.Uint64 +// indexReplica int +// streamListConcurrency int +// backgroundSyncInterval time.Duration +// backgroundCompactionInterval time.Duration +// } +// type want struct { +// wantLatestObject *payload.Object_Vector +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Object_Vector) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotLatestObject *payload.Object_Vector) error { +// if !reflect.DeepEqual(gotLatestObject, w.wantLatestObject) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLatestObject, w.wantLatestObject) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// id:"", +// addr:"", +// latestAgent:"", +// latest:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// id:"", +// addr:"", +// latestAgent:"", +// latest:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &correct{ +// eg: test.fields.eg, +// discoverer: test.fields.discoverer, +// gateway: test.fields.gateway, +// checkedList: test.fields.checkedList, +// checkedIndexCount: test.fields.checkedIndexCount, +// correctedOldIndexCount: test.fields.correctedOldIndexCount, +// correctedReplicationCount: test.fields.correctedReplicationCount, +// indexReplica: test.fields.indexReplica, +// streamListConcurrency: test.fields.streamListConcurrency, +// backgroundSyncInterval: test.fields.backgroundSyncInterval, +// backgroundCompactionInterval: test.fields.backgroundCompactionInterval, +// } +// +// gotLatestObject := c.getLatestObject(test.args.ctx, test.args.id, test.args.addr, test.args.latestAgent, test.args.latest) +// if err := checkFunc(test.want, gotLatestObject); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_correct_correctTimestamp(t *testing.T) { +// type args struct { +// ctx context.Context +// id string +// latestObject *payload.Object_Vector +// found map[string]*payload.Object_Timestamp +// } +// type fields struct { +// eg errgroup.Group +// discoverer discoverer.Client +// gateway vc.Client +// checkedList pogreb.DB +// checkedIndexCount atomic.Uint64 +// correctedOldIndexCount atomic.Uint64 +// correctedReplicationCount atomic.Uint64 +// indexReplica int +// streamListConcurrency int +// backgroundSyncInterval time.Duration +// backgroundCompactionInterval time.Duration +// } +// type want struct{} +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// id:"", +// latestObject:nil, +// found:nil, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// id:"", +// latestObject:nil, +// found:nil, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &correct{ +// eg: test.fields.eg, +// discoverer: test.fields.discoverer, +// gateway: test.fields.gateway, +// checkedList: test.fields.checkedList, +// checkedIndexCount: test.fields.checkedIndexCount, +// correctedOldIndexCount: test.fields.correctedOldIndexCount, +// correctedReplicationCount: test.fields.correctedReplicationCount, +// indexReplica: test.fields.indexReplica, +// streamListConcurrency: test.fields.streamListConcurrency, +// backgroundSyncInterval: test.fields.backgroundSyncInterval, +// backgroundCompactionInterval: test.fields.backgroundCompactionInterval, +// } +// +// c.correctTimestamp(test.args.ctx, test.args.id, test.args.latestObject, test.args.found) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_correct_correctOversupply(t *testing.T) { +// type args struct { +// ctx context.Context +// id string +// selfAddr string +// debugMsg string +// found map[string]*payload.Object_Timestamp +// diff int +// } +// type fields struct { +// eg errgroup.Group +// discoverer discoverer.Client +// gateway vc.Client +// checkedList pogreb.DB +// checkedIndexCount atomic.Uint64 +// correctedOldIndexCount atomic.Uint64 +// correctedReplicationCount atomic.Uint64 +// indexReplica int +// streamListConcurrency int +// backgroundSyncInterval time.Duration +// backgroundCompactionInterval time.Duration +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// id:"", +// selfAddr:"", +// debugMsg:"", +// found:nil, +// diff:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// id:"", +// selfAddr:"", +// debugMsg:"", +// found:nil, +// diff:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &correct{ +// eg: test.fields.eg, +// discoverer: test.fields.discoverer, +// gateway: test.fields.gateway, +// checkedList: test.fields.checkedList, +// checkedIndexCount: test.fields.checkedIndexCount, +// correctedOldIndexCount: test.fields.correctedOldIndexCount, +// correctedReplicationCount: test.fields.correctedReplicationCount, +// indexReplica: test.fields.indexReplica, +// streamListConcurrency: test.fields.streamListConcurrency, +// backgroundSyncInterval: test.fields.backgroundSyncInterval, +// backgroundCompactionInterval: test.fields.backgroundCompactionInterval, +// } +// +// err := c.correctOversupply(test.args.ctx, test.args.id, test.args.selfAddr, test.args.debugMsg, test.args.found, test.args.diff) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_correct_correctShortage(t *testing.T) { +// type args struct { +// ctx context.Context +// id string +// selfAddr string +// debugMsg string +// latestObject *payload.Object_Vector +// found map[string]*payload.Object_Timestamp +// diff int +// } +// type fields struct { +// eg errgroup.Group +// discoverer discoverer.Client +// gateway vc.Client +// checkedList pogreb.DB +// checkedIndexCount atomic.Uint64 +// correctedOldIndexCount atomic.Uint64 +// correctedReplicationCount atomic.Uint64 +// indexReplica int +// streamListConcurrency int +// backgroundSyncInterval time.Duration +// backgroundCompactionInterval time.Duration +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// id:"", +// selfAddr:"", +// debugMsg:"", +// latestObject:nil, +// found:nil, +// diff:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// id:"", +// selfAddr:"", +// debugMsg:"", +// latestObject:nil, +// found:nil, +// diff:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &correct{ +// eg: test.fields.eg, +// discoverer: test.fields.discoverer, +// gateway: test.fields.gateway, +// checkedList: test.fields.checkedList, +// checkedIndexCount: test.fields.checkedIndexCount, +// correctedOldIndexCount: test.fields.correctedOldIndexCount, +// correctedReplicationCount: test.fields.correctedReplicationCount, +// indexReplica: test.fields.indexReplica, +// streamListConcurrency: test.fields.streamListConcurrency, +// backgroundSyncInterval: test.fields.backgroundSyncInterval, +// backgroundCompactionInterval: test.fields.backgroundCompactionInterval, +// } +// +// err := c.correctShortage(test.args.ctx, test.args.id, test.args.selfAddr, test.args.debugMsg, test.args.latestObject, test.args.found, test.args.diff) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/index/job/correction/usecase/corrector.go b/pkg/index/job/correction/usecase/corrector.go index 976d3ad961..5bf3b6b267 100644 --- a/pkg/index/job/correction/usecase/corrector.go +++ b/pkg/index/job/correction/usecase/corrector.go @@ -185,10 +185,10 @@ func (r *run) Start(ctx context.Context) (<-chan error, error) { } })) - // main groutine to run the job + // main goroutine to run the job r.eg.Go(safety.RecoverFunc(func() (err error) { defer func() { - log.Info("fiding my pid to kill myself") + log.Info("finding my pid to kill myself") p, err := os.FindProcess(os.Getpid()) if err != nil { // using Fatal to avoid this process to be zombie diff --git a/pkg/index/job/readreplica/rotate/service/rotator.go b/pkg/index/job/readreplica/rotate/service/rotator.go index 9975e31b1a..ef24e67889 100644 --- a/pkg/index/job/readreplica/rotate/service/rotator.go +++ b/pkg/index/job/readreplica/rotate/service/rotator.go @@ -198,7 +198,7 @@ func (s *subProcess) createSnapshot( oldSnap = cur.DeepCopy() newNameBase := getNewBaseName(cur.GetObjectMeta().GetName()) if newNameBase == "" { - return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replicaid", cur.GetObjectMeta().GetName()) + return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replica id", cur.GetObjectMeta().GetName()) } newSnap = &k8s.VolumeSnapshot{ ObjectMeta: k8s.ObjectMeta{ @@ -244,7 +244,7 @@ func (s *subProcess) createPVC( oldPvc = cur.DeepCopy() newNameBase := getNewBaseName(cur.GetObjectMeta().GetName()) if newNameBase == "" { - return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replicaid", cur.GetObjectMeta().GetName()) + return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replica id", cur.GetObjectMeta().GetName()) } // remove timestamp from old pvc name diff --git a/pkg/index/operator/service/operator.go b/pkg/index/operator/service/operator.go index dc9f476958..cfc086a58f 100644 --- a/pkg/index/operator/service/operator.go +++ b/pkg/index/operator/service/operator.go @@ -309,11 +309,11 @@ func (o *operator) ensureJobConcurrency( } for _, job := range jobList.Items { - annotaions := job.Spec.Template.Annotations - if annotaions == nil { + annotations := job.Spec.Template.Annotations + if annotations == nil { continue } - id, ok := annotaions[o.targetReadReplicaIDAnnotationsKey] + id, ok := annotations[o.targetReadReplicaIDAnnotationsKey] if !ok { continue } diff --git a/pkg/manager/index/service/indexer.go b/pkg/manager/index/service/indexer.go index 42e90b1c9b..88400fe75f 100644 --- a/pkg/manager/index/service/indexer.go +++ b/pkg/manager/index/service/indexer.go @@ -24,10 +24,10 @@ import ( "sync/atomic" "time" - agent "github.com/vdaas/vald/apis/grpc/v1/agent/core" "github.com/vdaas/vald/apis/grpc/v1/payload" - vald "github.com/vdaas/vald/apis/grpc/v1/vald" + agent "github.com/vdaas/vald/internal/client/v1/client/agent/core" "github.com/vdaas/vald/internal/client/v1/client/discoverer" + vald "github.com/vdaas/vald/internal/client/v1/client/vald" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" diff --git a/pkg/tools/benchmark/job/config/config.go b/pkg/tools/benchmark/job/config/config.go index a81c68d1c9..03c3adc485 100644 --- a/pkg/tools/benchmark/job/config/config.go +++ b/pkg/tools/benchmark/job/config/config.go @@ -112,7 +112,7 @@ func NewConfig(ctx context.Context, path string) (cfg *Config, err error) { if jobResource.Spec.ServerConfig != nil { overrideCfg.Server = (*jobResource.Spec.ServerConfig).Bind() } - // jobResource.Spec has another field comparering Config.Job, so json.Marshal and Unmarshal are used for embedding field value of Config.Job from jobResource.Spec + // jobResource.Spec has another field comparing Config.Job, so json.Marshal and Unmarshal are used for embedding field value of Config.Job from jobResource.Spec var overrideJobCfg config.BenchmarkJob b, err := json.Marshal(*jobResource.Spec.DeepCopy()) if err == nil { diff --git a/pkg/tools/benchmark/operator/service/operator.go b/pkg/tools/benchmark/operator/service/operator.go index 187673d527..1d80e6838d 100644 --- a/pkg/tools/benchmark/operator/service/operator.go +++ b/pkg/tools/benchmark/operator/service/operator.go @@ -193,8 +193,8 @@ func (o *operator) jobReconcile(ctx context.Context, jobList map[string][]k8s.Jo } // benchmarkJobStatus is used for update benchmark job resource status benchmarkJobStatus := make(map[string]v1.BenchmarkJobStatus) - // jobNames is used for check whether cjobs has delted job. - // If cjobs has the delted job, it will be remove the end of jobReconcile function. + // jobNames is used for check whether cjobs has deleted job. + // If cjobs has the deleted job, it will be remove the end of jobReconcile function. jobNames := map[string]struct{}{} for _, jobs := range jobList { cnt := len(jobs) diff --git a/pkg/tools/benchmark/operator/service/operator_test.go b/pkg/tools/benchmark/operator/service/operator_test.go index 1e2e0abba5..70129badaa 100644 --- a/pkg/tools/benchmark/operator/service/operator_test.go +++ b/pkg/tools/benchmark/operator/service/operator_test.go @@ -2911,7 +2911,7 @@ func Test_operator_checkAtomics(t *testing.T) { tests := []test{ func() test { return test{ - name: "return nil with no mismatch atmoics", + name: "return nil with no mismatch atomics", fields: fields{ scenarios: func() *atomic.Pointer[map[string]*scenario] { ap := atomic.Pointer[map[string]*scenario]{} diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 3eda141a18..928d667c87 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -4,29 +4,29 @@ version = 3 [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "agent" version = "0.1.0" dependencies = [ "algorithm", - "prost 0.13.1", + "prost 0.13.2", "proto", "tokio", "tokio-stream", - "tonic 0.12.1", + "tonic 0.12.2", ] [[package]] @@ -39,9 +39,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" [[package]] name = "async-stream" @@ -67,9 +67,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", @@ -182,17 +182,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -242,9 +242,9 @@ checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "cc" -version = "1.1.13" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72db2f7947ecee9b03b510377e8bb9077afa27176fdbff55c51027e976fdcc48" +checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476" dependencies = [ "shlex", ] @@ -267,9 +267,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.126" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c4eae4b7fc8dcb0032eb3b1beee46b38d371cdeaf2d0c64b9944f6f69ad7755" +checksum = "54ccead7d199d584d139148b04b4a368d1ec7556a1d9ea2548febb1b9d49f9a4" dependencies = [ "cc", "cxxbridge-flags", @@ -279,9 +279,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.126" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c822bf7fb755d97328d6c337120b6f843678178751cba33c9da25cf522272e0" +checksum = "c77953e99f01508f89f55c494bfa867171ef3a6c8cea03d26975368f2121a5c1" dependencies = [ "cc", "codespan-reporting", @@ -294,15 +294,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.126" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719d6197dc016c88744aff3c0d0340a01ecce12e8939fc282e7c8f583ee64bc6" +checksum = "65777e06cc48f0cb0152024c77d6cf9e4bdb4408e7b48bea993d42fa0f5b02b6" [[package]] name = "cxxbridge-macro" -version = "1.0.126" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35de3b547387863c8f82013c4f79f1c2162edee956383e4089e1d04c18c4f16c" +checksum = "98532a60dedaebc4848cb2cba5023337cc9ea3af16a5b062633fabfd9f18fb60" dependencies = [ "proc-macro2", "quote", @@ -393,9 +393,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "h2" @@ -409,7 +409,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.4.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -428,7 +428,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.4.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -593,9 +593,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -633,15 +633,24 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ead53efc7ea8ed3cfb0c79fc8023fbb782a5432b52830b6518941cebe6505c" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown 0.14.5", ] [[package]] +<<<<<<< HEAD +======= +name = "ipnet" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" + +[[package]] +>>>>>>> 3ba0002d4 (Add UpdateTimestamp API (#2605)) name = "is_ci" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -753,11 +762,11 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "miniz_oxide" -version = "0.7.4" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ - "adler", + "adler2", ] [[package]] @@ -785,9 +794,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.3" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] @@ -992,12 +1001,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" +checksum = "3b2ecbe40f08db5c006b5764a2645f7f3f141ce756412ac9e1dd6087e6d32995" dependencies = [ "bytes", - "prost-derive 0.13.1", + "prost-derive 0.13.2", ] [[package]] @@ -1015,9 +1024,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" +checksum = "acf0c195eebb4af52c752bec4f52f645da98b6e92077a04110c7f349477ae5ac" dependencies = [ "anyhow", "itertools 0.13.0", @@ -1028,11 +1037,11 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cee5168b05f49d4b0ca581206eb14a7b22fafd963efe729ac48eb03266e25cc2" +checksum = "60caa6738c7369b940c3d49246a8d1749323674c65cb13010134f5c9bad5b519" dependencies = [ - "prost 0.13.1", + "prost 0.13.2", ] [[package]] @@ -1040,16 +1049,16 @@ name = "proto" version = "0.1.0" dependencies = [ "futures-core", - "prost 0.13.1", - "tonic 0.12.1", + "prost 0.13.2", + "tonic 0.12.2", "tonic-types", ] [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -1137,9 +1146,9 @@ checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36" dependencies = [ "bitflags 2.6.0", "errno", @@ -1168,18 +1177,18 @@ checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152" [[package]] name = "serde" -version = "1.0.208" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.208" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", @@ -1187,6 +1196,33 @@ dependencies = [ ] [[package]] +<<<<<<< HEAD +======= +name = "serde_json" +version = "1.0.128" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +>>>>>>> 3ba0002d4 (Add UpdateTimestamp API (#2605)) name = "shlex" version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -1234,9 +1270,9 @@ dependencies = [ [[package]] name = "supports-color" -version = "3.0.0" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9829b314621dfc575df4e409e79f9d6a66a3bd707ab73f23cb4aa3a854ac854f" +checksum = "8775305acf21c96926c900ad056abeef436701108518cf890020387236ac5a77" dependencies = [ "is_ci", ] @@ -1255,9 +1291,9 @@ checksum = "b7401a30af6cb5818bb64852270bb722533397edcfc7344954a38f420819ece2" [[package]] name = "syn" -version = "2.0.75" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6af063034fc1935ede7be0122941bafa9bacb949334d090b77ca98b5817c7d9" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2", "quote", @@ -1349,9 +1385,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.39.3" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9babc99b9923bfa4804bd74722ff02c0381021eafa4db9949217e3be8e84fff5" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", @@ -1388,9 +1424,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -1400,9 +1436,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -1440,9 +1476,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401" +checksum = "c6f6ba989e4b2c58ae83d862d3a3e27690b6e3ae630d0deb59f3697f32aa88ad" dependencies = [ "async-stream", "async-trait", @@ -1458,7 +1494,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "prost 0.13.1", + "prost 0.13.2", "socket2", "tokio", "tokio-stream", @@ -1470,13 +1506,13 @@ dependencies = [ [[package]] name = "tonic-types" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5563899ec5aa5f0ec48e37457461ffbbc184c9a0f413f715dacd154f46408a10" +checksum = "9d967793411bc1a5392accf4731114295f0fd122865d22cde46a8584b03402b2" dependencies = [ - "prost 0.13.1", + "prost 0.13.2", "prost-types", - "tonic 0.12.1", + "tonic 0.12.2", ] [[package]] diff --git a/rust/bin/agent/src/handler/index.rs b/rust/bin/agent/src/handler/index.rs index fc33ceb22b..fa6b39d6a6 100644 --- a/rust/bin/agent/src/handler/index.rs +++ b/rust/bin/agent/src/handler/index.rs @@ -15,7 +15,7 @@ // use proto::{ core::v1::agent_server, - payload::v1::{control, info, object, Empty}, + payload::v1::{control, info, Empty}, vald::v1::index_server, }; diff --git a/rust/bin/agent/src/handler/update.rs b/rust/bin/agent/src/handler/update.rs index c738a3dfd5..90ac9d5cdb 100644 --- a/rust/bin/agent/src/handler/update.rs +++ b/rust/bin/agent/src/handler/update.rs @@ -45,4 +45,12 @@ impl update_server::Update for super::Agent { ) -> std::result::Result, tonic::Status> { todo!() } + + #[doc = " A method to update timestamp indexed vectors in a single request.\n"] + async fn update_timestamp( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + todo!() + } } diff --git a/rust/libs/proto/src/core.v1.tonic.rs b/rust/libs/proto/src/core.v1.tonic.rs index b021552c00..f19cd43034 100644 --- a/rust/libs/proto/src/core.v1.tonic.rs +++ b/rust/libs/proto/src/core.v1.tonic.rs @@ -217,19 +217,17 @@ pub mod agent_server { } #[derive(Debug)] pub struct AgentServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl AgentServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -292,7 +290,6 @@ pub mod agent_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/core.v1.Agent/CreateIndex" => { #[allow(non_camel_case_types)] @@ -326,7 +323,6 @@ pub mod agent_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = CreateIndexSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -375,7 +371,6 @@ pub mod agent_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = SaveIndexSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -424,7 +419,6 @@ pub mod agent_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = CreateAndSaveIndexSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -446,8 +440,11 @@ pub mod agent_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -468,16 +465,6 @@ pub mod agent_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for AgentServer { const NAME: &'static str = "core.v1.Agent"; } diff --git a/rust/libs/proto/src/discoverer.v1.tonic.rs b/rust/libs/proto/src/discoverer.v1.tonic.rs index 41bdf757ee..e1ca78c88a 100644 --- a/rust/libs/proto/src/discoverer.v1.tonic.rs +++ b/rust/libs/proto/src/discoverer.v1.tonic.rs @@ -225,19 +225,17 @@ pub mod discoverer_server { } #[derive(Debug)] pub struct DiscovererServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl DiscovererServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -300,7 +298,6 @@ pub mod discoverer_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/discoverer.v1.Discoverer/Pods" => { #[allow(non_camel_case_types)] @@ -334,7 +331,6 @@ pub mod discoverer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = PodsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -383,7 +379,6 @@ pub mod discoverer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = NodesSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -432,7 +427,6 @@ pub mod discoverer_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = ServicesSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -454,8 +448,11 @@ pub mod discoverer_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -476,16 +473,6 @@ pub mod discoverer_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for DiscovererServer { const NAME: &'static str = "discoverer.v1.Discoverer"; } diff --git a/rust/libs/proto/src/filter.egress.v1.tonic.rs b/rust/libs/proto/src/filter.egress.v1.tonic.rs index 3ff1dd2a6e..9627dbcf2c 100644 --- a/rust/libs/proto/src/filter.egress.v1.tonic.rs +++ b/rust/libs/proto/src/filter.egress.v1.tonic.rs @@ -17,6 +17,8 @@ pub mod filter_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; use tonic::codegen::http::Uri; + /** Represent the egress filter service. +*/ #[derive(Debug, Clone)] pub struct FilterClient { inner: tonic::client::Grpc, @@ -187,21 +189,21 @@ pub mod filter_server { tonic::Status, >; } + /** Represent the egress filter service. +*/ #[derive(Debug)] pub struct FilterServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl FilterServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -264,7 +266,6 @@ pub mod filter_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/filter.egress.v1.Filter/FilterDistance" => { #[allow(non_camel_case_types)] @@ -298,7 +299,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = FilterDistanceSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -347,7 +347,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = FilterVectorSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -369,8 +368,11 @@ pub mod filter_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -391,16 +393,6 @@ pub mod filter_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for FilterServer { const NAME: &'static str = "filter.egress.v1.Filter"; } diff --git a/rust/libs/proto/src/filter.ingress.v1.tonic.rs b/rust/libs/proto/src/filter.ingress.v1.tonic.rs index 2637938802..76f377386b 100644 --- a/rust/libs/proto/src/filter.ingress.v1.tonic.rs +++ b/rust/libs/proto/src/filter.ingress.v1.tonic.rs @@ -17,6 +17,8 @@ pub mod filter_client { #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] use tonic::codegen::*; use tonic::codegen::http::Uri; + /** Represent the ingress filter service. +*/ #[derive(Debug, Clone)] pub struct FilterClient { inner: tonic::client::Grpc, @@ -187,21 +189,21 @@ pub mod filter_server { tonic::Status, >; } + /** Represent the ingress filter service. +*/ #[derive(Debug)] pub struct FilterServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl FilterServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -264,7 +266,6 @@ pub mod filter_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/filter.ingress.v1.Filter/GenVector" => { #[allow(non_camel_case_types)] @@ -298,7 +299,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GenVectorSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -347,7 +347,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = FilterVectorSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -369,8 +368,11 @@ pub mod filter_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -391,16 +393,6 @@ pub mod filter_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for FilterServer { const NAME: &'static str = "filter.ingress.v1.Filter"; } diff --git a/rust/libs/proto/src/mirror.v1.tonic.rs b/rust/libs/proto/src/mirror.v1.tonic.rs index 443b46e2ee..fb55727a62 100644 --- a/rust/libs/proto/src/mirror.v1.tonic.rs +++ b/rust/libs/proto/src/mirror.v1.tonic.rs @@ -142,19 +142,17 @@ pub mod mirror_server { } #[derive(Debug)] pub struct MirrorServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl MirrorServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -217,7 +215,6 @@ pub mod mirror_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/mirror.v1.Mirror/Register" => { #[allow(non_camel_case_types)] @@ -251,7 +248,6 @@ pub mod mirror_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = RegisterSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -273,8 +269,11 @@ pub mod mirror_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -295,16 +294,6 @@ pub mod mirror_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for MirrorServer { const NAME: &'static str = "mirror.v1.Mirror"; } diff --git a/rust/libs/proto/src/payload.v1.rs b/rust/libs/proto/src/payload.v1.rs index 17a6a036ce..5070da89ce 100644 --- a/rust/libs/proto/src/payload.v1.rs +++ b/rust/libs/proto/src/payload.v1.rs @@ -14,7 +14,7 @@ // limitations under the License. // #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Search { } /// Nested message and enum types in `Search`. @@ -192,7 +192,7 @@ pub mod search { } /// Filter related messages. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Filter { } /// Nested message and enum types in `Filter`. @@ -219,7 +219,7 @@ pub mod filter { } /// Insert related messages. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Insert { } /// Nested message and enum types in `Insert`. @@ -282,7 +282,7 @@ pub mod insert { } /// Update related messages #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Update { } /// Nested message and enum types in `Update`. @@ -328,6 +328,20 @@ pub mod update { #[prost(message, repeated, tag="1")] pub requests: ::prost::alloc::vec::Vec, } + /// Represent a vector meta data. + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct TimestampRequest { + /// The vector ID. + #[prost(string, tag="1")] + pub id: ::prost::alloc::string::String, + /// timestamp represents when this vector inserted. + #[prost(int64, tag="2")] + pub timestamp: i64, + /// force represents forcefully update the timestamp. + #[prost(bool, tag="3")] + pub force: bool, + } /// Represent the update configuration. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -349,7 +363,7 @@ pub mod update { } /// Upsert related messages. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Upsert { } /// Nested message and enum types in `Upsert`. @@ -416,7 +430,7 @@ pub mod upsert { } /// Remove related messages. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Remove { } /// Nested message and enum types in `Remove`. @@ -451,7 +465,7 @@ pub mod remove { } /// Represent the timestamp comparison. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Timestamp { /// The timestamp. #[prost(int64, tag="1")] @@ -512,7 +526,7 @@ pub mod remove { } /// Represent the remove configuration. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Config { /// A flag to skip exist check during upsert operation. #[prost(bool, tag="1")] @@ -524,19 +538,19 @@ pub mod remove { } /// Flush related messages. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Flush { } /// Nested message and enum types in `Flush`. pub mod flush { #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Request { } } /// Common messages. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Object { } /// Nested message and enum types in `Object`. @@ -742,13 +756,13 @@ pub mod object { } /// Represent the list object vector stream request and response. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct List { } /// Nested message and enum types in `List`. pub mod list { #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Request { } #[allow(clippy::derive_partial_eq_without_eq)] @@ -774,14 +788,14 @@ pub mod object { } /// Control related messages. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Control { } /// Nested message and enum types in `Control`. pub mod control { /// Represent the create index request. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct CreateIndexRequest { /// The pool size of the create index operation. #[prost(uint32, tag="1")] @@ -790,7 +804,7 @@ pub mod control { } /// Discoverer related messages. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Discoverer { } /// Nested message and enum types in `Discoverer`. @@ -812,21 +826,21 @@ pub mod discoverer { } /// Info related messages. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Info { } /// Nested message and enum types in `Info`. pub mod info { /// Represent the index information messages. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Index { } /// Nested message and enum types in `Index`. pub mod index { /// Represent the index count message. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Count { /// The stored index count. #[prost(uint32, tag="1")] @@ -857,7 +871,7 @@ pub mod info { } /// Represent the UUID message. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Uuid { } /// Nested message and enum types in `UUID`. @@ -1136,7 +1150,7 @@ pub mod info { } /// Represent the CPU information message. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Cpu { /// The CPU resource limit. #[prost(double, tag="1")] @@ -1150,7 +1164,7 @@ pub mod info { } /// Represent the memory information message. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Memory { /// The memory limit. #[prost(double, tag="1")] @@ -1196,7 +1210,7 @@ pub mod info { } /// Mirror related messages. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Mirror { } /// Nested message and enum types in `Mirror`. @@ -1223,7 +1237,7 @@ pub mod mirror { } /// Represent an empty message. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Empty { } // @@protoc_insertion_point(module) diff --git a/rust/libs/proto/src/rpc.v1.rs b/rust/libs/proto/src/rpc.v1.rs index 11e53490a2..3b9abf8f8b 100644 --- a/rust/libs/proto/src/rpc.v1.rs +++ b/rust/libs/proto/src/rpc.v1.rs @@ -56,7 +56,7 @@ pub struct ErrorInfo { /// number of retries have been reached or a maximum retry delay cap has been /// reached. #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct RetryInfo { /// Clients should wait at least this long between retrying the same request. #[prost(message, optional, tag="1")] diff --git a/rust/libs/proto/src/sidecar.v1.tonic.rs b/rust/libs/proto/src/sidecar.v1.tonic.rs index f1f8c1065a..ad7acdee15 100644 --- a/rust/libs/proto/src/sidecar.v1.tonic.rs +++ b/rust/libs/proto/src/sidecar.v1.tonic.rs @@ -108,19 +108,17 @@ pub mod sidecar_server { pub trait Sidecar: Send + Sync + 'static {} #[derive(Debug)] pub struct SidecarServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl SidecarServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -183,15 +181,17 @@ pub mod sidecar_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { _ => { Box::pin(async move { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -212,16 +212,6 @@ pub mod sidecar_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for SidecarServer { const NAME: &'static str = "sidecar.v1.Sidecar"; } diff --git a/rust/libs/proto/src/vald.v1.tonic.rs b/rust/libs/proto/src/vald.v1.tonic.rs index 9c856bfd0d..29843a4d93 100644 --- a/rust/libs/proto/src/vald.v1.tonic.rs +++ b/rust/libs/proto/src/vald.v1.tonic.rs @@ -124,6 +124,8 @@ pub mod filter_client { .insert(GrpcMethod::new("vald.v1.Filter", "SearchObject")); self.inner.unary(req, path, codec).await } + /** A method to search multiple objects. +*/ pub async fn multi_search_object( &mut self, request: impl tonic::IntoRequest< @@ -475,6 +477,8 @@ pub mod filter_server { tonic::Response, tonic::Status, >; + /** A method to search multiple objects. +*/ async fn multi_search_object( &self, request: tonic::Request< @@ -633,19 +637,17 @@ pub mod filter_server { } #[derive(Debug)] pub struct FilterServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl FilterServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -708,7 +710,6 @@ pub mod filter_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/vald.v1.Filter/SearchObject" => { #[allow(non_camel_case_types)] @@ -742,7 +743,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = SearchObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -791,7 +791,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiSearchObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -843,7 +842,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamSearchObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -892,7 +890,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = InsertObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -944,7 +941,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamInsertObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -993,7 +989,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiInsertObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1042,7 +1037,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = UpdateObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1094,7 +1088,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamUpdateObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1143,7 +1136,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiUpdateObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1192,7 +1184,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = UpsertObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1244,7 +1235,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamUpsertObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1293,7 +1283,6 @@ pub mod filter_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiUpsertObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1315,8 +1304,11 @@ pub mod filter_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -1337,16 +1329,6 @@ pub mod filter_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for FilterServer { const NAME: &'static str = "vald.v1.Filter"; } @@ -1479,19 +1461,17 @@ pub mod flush_server { } #[derive(Debug)] pub struct FlushServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl FlushServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -1554,7 +1534,6 @@ pub mod flush_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/vald.v1.Flush/Flush" => { #[allow(non_camel_case_types)] @@ -1588,7 +1567,6 @@ pub mod flush_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = FlushSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -1610,8 +1588,11 @@ pub mod flush_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -1632,16 +1613,6 @@ pub mod flush_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for FlushServer { const NAME: &'static str = "vald.v1.Flush"; } @@ -1919,19 +1890,17 @@ pub mod index_server { } #[derive(Debug)] pub struct IndexServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl IndexServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -1994,7 +1963,6 @@ pub mod index_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/vald.v1.Index/IndexInfo" => { #[allow(non_camel_case_types)] @@ -2028,7 +1996,6 @@ pub mod index_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = IndexInfoSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -2077,7 +2044,6 @@ pub mod index_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = IndexDetailSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -2126,7 +2092,6 @@ pub mod index_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = IndexStatisticsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -2175,7 +2140,6 @@ pub mod index_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = IndexStatisticsDetailSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -2224,7 +2188,6 @@ pub mod index_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = IndexPropertySvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -2246,8 +2209,11 @@ pub mod index_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -2268,16 +2234,6 @@ pub mod index_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for IndexServer { const NAME: &'static str = "vald.v1.Index"; } @@ -2503,19 +2459,17 @@ pub mod insert_server { } #[derive(Debug)] pub struct InsertServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl InsertServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -2578,7 +2532,6 @@ pub mod insert_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/vald.v1.Insert/Insert" => { #[allow(non_camel_case_types)] @@ -2612,7 +2565,6 @@ pub mod insert_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = InsertSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -2664,7 +2616,6 @@ pub mod insert_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamInsertSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -2713,7 +2664,6 @@ pub mod insert_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiInsertSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -2735,8 +2685,11 @@ pub mod insert_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -2757,16 +2710,6 @@ pub mod insert_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for InsertServer { const NAME: &'static str = "vald.v1.Insert"; } @@ -3078,19 +3021,17 @@ pub mod object_server { } #[derive(Debug)] pub struct ObjectServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl ObjectServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -3153,7 +3094,6 @@ pub mod object_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/vald.v1.Object/Exists" => { #[allow(non_camel_case_types)] @@ -3187,7 +3127,6 @@ pub mod object_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = ExistsSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -3236,7 +3175,6 @@ pub mod object_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -3288,7 +3226,6 @@ pub mod object_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamGetObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -3338,7 +3275,6 @@ pub mod object_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamListObjectSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -3387,7 +3323,6 @@ pub mod object_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = GetTimestampSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -3409,8 +3344,11 @@ pub mod object_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -3431,16 +3369,6 @@ pub mod object_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for ObjectServer { const NAME: &'static str = "vald.v1.Object"; } @@ -3706,19 +3634,17 @@ pub mod remove_server { } #[derive(Debug)] pub struct RemoveServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl RemoveServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -3781,7 +3707,6 @@ pub mod remove_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/vald.v1.Remove/Remove" => { #[allow(non_camel_case_types)] @@ -3815,7 +3740,6 @@ pub mod remove_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = RemoveSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -3864,7 +3788,6 @@ pub mod remove_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = RemoveByTimestampSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -3916,7 +3839,6 @@ pub mod remove_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamRemoveSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -3965,7 +3887,6 @@ pub mod remove_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiRemoveSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -3987,8 +3908,11 @@ pub mod remove_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -4009,16 +3933,6 @@ pub mod remove_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for RemoveServer { const NAME: &'static str = "vald.v1.Remove"; } @@ -4132,6 +4046,8 @@ pub mod search_client { req.extensions_mut().insert(GrpcMethod::new("vald.v1.Search", "Search")); self.inner.unary(req, path, codec).await } + /** A method to search indexed vectors by ID. +*/ pub async fn search_by_id( &mut self, request: impl tonic::IntoRequest< @@ -4482,6 +4398,8 @@ pub mod search_server { tonic::Response, tonic::Status, >; + /** A method to search indexed vectors by ID. +*/ async fn search_by_id( &self, request: tonic::Request, @@ -4636,19 +4554,17 @@ pub mod search_server { } #[derive(Debug)] pub struct SearchServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl SearchServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -4711,7 +4627,6 @@ pub mod search_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/vald.v1.Search/Search" => { #[allow(non_camel_case_types)] @@ -4745,7 +4660,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = SearchSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -4794,7 +4708,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = SearchByIDSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -4846,7 +4759,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamSearchSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -4898,7 +4810,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamSearchByIDSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -4947,7 +4858,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiSearchSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -4996,7 +4906,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiSearchByIDSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -5045,7 +4954,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = LinearSearchSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -5094,7 +5002,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = LinearSearchByIDSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -5146,7 +5053,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamLinearSearchSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -5199,7 +5105,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamLinearSearchByIDSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -5248,7 +5153,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiLinearSearchSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -5298,7 +5202,6 @@ pub mod search_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiLinearSearchByIDSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -5320,8 +5223,11 @@ pub mod search_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -5342,16 +5248,6 @@ pub mod search_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for SearchServer { const NAME: &'static str = "vald.v1.Search"; } @@ -5527,6 +5423,35 @@ pub mod update_client { .insert(GrpcMethod::new("vald.v1.Update", "MultiUpdate")); self.inner.unary(req, path, codec).await } + /** A method to update timestamp an indexed vector. +*/ + pub async fn update_timestamp( + &mut self, + request: impl tonic::IntoRequest< + super::super::super::payload::v1::update::TimestampRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/vald.v1.Update/UpdateTimestamp", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("vald.v1.Update", "UpdateTimestamp")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -5574,22 +5499,31 @@ pub mod update_server { tonic::Response, tonic::Status, >; + /** A method to update timestamp an indexed vector. +*/ + async fn update_timestamp( + &self, + request: tonic::Request< + super::super::super::payload::v1::update::TimestampRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } #[derive(Debug)] pub struct UpdateServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl UpdateServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -5652,7 +5586,6 @@ pub mod update_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/vald.v1.Update/Update" => { #[allow(non_camel_case_types)] @@ -5686,7 +5619,6 @@ pub mod update_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = UpdateSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -5738,7 +5670,6 @@ pub mod update_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamUpdateSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -5787,7 +5718,6 @@ pub mod update_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiUpdateSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -5804,13 +5734,64 @@ pub mod update_server { }; Box::pin(fut) } + "/vald.v1.Update/UpdateTimestamp" => { + #[allow(non_camel_case_types)] + struct UpdateTimestampSvc(pub Arc); + impl< + T: Update, + > tonic::server::UnaryService< + super::super::super::payload::v1::update::TimestampRequest, + > for UpdateTimestampSvc { + type Response = super::super::super::payload::v1::object::Location; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::super::payload::v1::update::TimestampRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_timestamp(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = UpdateTimestampSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -5831,16 +5812,6 @@ pub mod update_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for UpdateServer { const NAME: &'static str = "vald.v1.Update"; } @@ -6066,19 +6037,17 @@ pub mod upsert_server { } #[derive(Debug)] pub struct UpsertServer { - inner: _Inner, + inner: Arc, accept_compression_encodings: EnabledCompressionEncodings, send_compression_encodings: EnabledCompressionEncodings, max_decoding_message_size: Option, max_encoding_message_size: Option, } - struct _Inner(Arc); impl UpsertServer { pub fn new(inner: T) -> Self { Self::from_arc(Arc::new(inner)) } pub fn from_arc(inner: Arc) -> Self { - let inner = _Inner(inner); Self { inner, accept_compression_encodings: Default::default(), @@ -6141,7 +6110,6 @@ pub mod upsert_server { Poll::Ready(Ok(())) } fn call(&mut self, req: http::Request) -> Self::Future { - let inner = self.inner.clone(); match req.uri().path() { "/vald.v1.Upsert/Upsert" => { #[allow(non_camel_case_types)] @@ -6175,7 +6143,6 @@ pub mod upsert_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = UpsertSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -6227,7 +6194,6 @@ pub mod upsert_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = StreamUpsertSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -6276,7 +6242,6 @@ pub mod upsert_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let inner = inner.0; let method = MultiUpsertSvc(inner); let codec = tonic::codec::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) @@ -6298,8 +6263,11 @@ pub mod upsert_server { Ok( http::Response::builder() .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) .body(empty_body()) .unwrap(), ) @@ -6320,16 +6288,6 @@ pub mod upsert_server { } } } - impl Clone for _Inner { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } - } - impl std::fmt::Debug for _Inner { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", self.0) - } - } impl tonic::server::NamedService for UpsertServer { const NAME: &'static str = "vald.v1.Upsert"; } diff --git a/rust/rust-toolchain b/rust/rust-toolchain index aaceec04e0..dbd41264aa 100644 --- a/rust/rust-toolchain +++ b/rust/rust-toolchain @@ -1 +1 @@ -1.80.0 +1.81.0 diff --git a/rust/rust-toolchain.toml b/rust/rust-toolchain.toml index f992c12f2c..dc19363102 100644 --- a/rust/rust-toolchain.toml +++ b/rust/rust-toolchain.toml @@ -14,4 +14,4 @@ # limitations under the License. # [toolchain] -channel = "1.80.0" +channel = "1.81.0" diff --git a/tests/chaos/chart/README.md b/tests/chaos/chart/README.md index a142fc4136..14acc560ca 100644 --- a/tests/chaos/chart/README.md +++ b/tests/chaos/chart/README.md @@ -41,4 +41,4 @@ A Helm chart for testing Vald using Chaos Mesh. --- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) diff --git a/tests/e2e/crud/crud_test.go b/tests/e2e/crud/crud_test.go index a5ba1273ba..71969d38f9 100644 --- a/tests/e2e/crud/crud_test.go +++ b/tests/e2e/crud/crud_test.go @@ -415,7 +415,7 @@ func TestE2EStandardCRUD(t *testing.T) { err = op.Flush(t, ctx) if err != nil { - // TODO: Remove code check afeter Flush API is available for agent-faiss and mirror-gateway + // TODO: Remove code check after Flush API is available for agent-faiss and mirror-gateway st, _, _ := status.ParseError(err, codes.Unknown, "") if st.Code() != codes.Unimplemented { t.Fatalf("an error occurred: %s", err) @@ -865,7 +865,7 @@ func TestE2EReadReplica(t *testing.T) { t.Log("waiting for read replica rotator jobs to complete...") if err := kubectl.WaitResources(ctx, t, "job", "app=vald-readreplica-rotate", "complete", "60s"); err != nil { t.Log("wait failed. printing yaml of vald-readreplica-rotate") - kubectl.KubectlCmd(ctx, t, "get", "pod", "-l", "app=vald-readreplica-rotate", "-oyaml") + kubectl.KubectlCmd(ctx, t, "get", "pod", "-l", "app=vald-readreplica-rotate", "-o", "yaml") t.Log("wait failed. printing log of vald-index-operator") kubectl.DebugLog(ctx, t, "app=vald-index-operator") t.Log("wait failed. printing log of vald-readreplica-rotate") diff --git a/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go b/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go index 114ac6daa0..8a22351a78 100644 --- a/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go +++ b/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go @@ -147,7 +147,7 @@ func Test_ngt_parallel_delete_and_insert(t *testing.T) { wg.Wait() if n.Len() != maxIDNum { - t.Errorf("inerted id num = %d, want = %d", n.Len(), maxIDNum) + t.Errorf("inserted id num = %d, want = %d", n.Len(), maxIDNum) } for i := int64(0); i < maxIDNum; i++ { @@ -230,7 +230,7 @@ func Test_ngt_parallel_insert_and_delete(t *testing.T) { wg.Wait() if want, got := n.Len(), uint64(0); want != got { - t.Errorf("inerted id num = %d, want = %d", got, want) + t.Errorf("inserted id num = %d, want = %d", got, want) } for i := int64(0); i < maxIDNum; i++ { diff --git a/versions/CHAOS_MESH_VERSION b/versions/CHAOS_MESH_VERSION index ec1cf33c3f..2714f5313a 100644 --- a/versions/CHAOS_MESH_VERSION +++ b/versions/CHAOS_MESH_VERSION @@ -1 +1 @@ -2.6.3 +2.6.4 diff --git a/versions/CMAKE_VERSION b/versions/CMAKE_VERSION index aaa0fde70b..7061f71e70 100644 --- a/versions/CMAKE_VERSION +++ b/versions/CMAKE_VERSION @@ -1 +1 @@ -3.30.2 +3.30.3 diff --git a/versions/DOCKER_VERSION b/versions/DOCKER_VERSION index 96e099462d..12919d2181 100644 --- a/versions/DOCKER_VERSION +++ b/versions/DOCKER_VERSION @@ -1 +1 @@ -v27.1.1 +v27.2.1 diff --git a/versions/GOLANGCILINT_VERSION b/versions/GOLANGCILINT_VERSION index be33d89791..137ade0bc6 100644 --- a/versions/GOLANGCILINT_VERSION +++ b/versions/GOLANGCILINT_VERSION @@ -1 +1 @@ -v1.59.1 +v1.61.0 diff --git a/versions/HELM_VERSION b/versions/HELM_VERSION index de3e42fc29..0af36fd743 100644 --- a/versions/HELM_VERSION +++ b/versions/HELM_VERSION @@ -1 +1 @@ -v3.15.3 +v3.15.4 diff --git a/versions/KIND_VERSION b/versions/KIND_VERSION index ca222b7cf3..2094a100ca 100644 --- a/versions/KIND_VERSION +++ b/versions/KIND_VERSION @@ -1 +1 @@ -0.23.0 +0.24.0 diff --git a/versions/KUBECTL_VERSION b/versions/KUBECTL_VERSION index 062a7525f1..1d37e1e347 100644 --- a/versions/KUBECTL_VERSION +++ b/versions/KUBECTL_VERSION @@ -1 +1 @@ -v1.30.3 \ No newline at end of file +v1.31.0 \ No newline at end of file diff --git a/versions/PROMETHEUS_STACK_VERSION b/versions/PROMETHEUS_STACK_VERSION index 14e3460225..3f9b36de51 100644 --- a/versions/PROMETHEUS_STACK_VERSION +++ b/versions/PROMETHEUS_STACK_VERSION @@ -1 +1 @@ -61.7.1 +62.6.0 diff --git a/versions/PROTOBUF_VERSION b/versions/PROTOBUF_VERSION index 383283e322..ba70ecb3bf 100644 --- a/versions/PROTOBUF_VERSION +++ b/versions/PROTOBUF_VERSION @@ -1 +1 @@ -27.3 +28.0 diff --git a/versions/RUST_VERSION b/versions/RUST_VERSION index aaceec04e0..dbd41264aa 100644 --- a/versions/RUST_VERSION +++ b/versions/RUST_VERSION @@ -1 +1 @@ -1.80.0 +1.81.0 diff --git a/versions/VALDCLI_VERSION b/versions/VALDCLI_VERSION deleted file mode 100644 index b84efa430e..0000000000 --- a/versions/VALDCLI_VERSION +++ /dev/null @@ -1 +0,0 @@ -v1.7.12 diff --git a/versions/actions/ACTIONS_UPLOAD_ARTIFACT b/versions/actions/ACTIONS_UPLOAD_ARTIFACT index 43270543f7..fdc6698807 100644 --- a/versions/actions/ACTIONS_UPLOAD_ARTIFACT +++ b/versions/actions/ACTIONS_UPLOAD_ARTIFACT @@ -1 +1 @@ -4.3.6 +4.4.0 diff --git a/versions/actions/GITHUB_CODEQL_ACTION_ANALYZE b/versions/actions/GITHUB_CODEQL_ACTION_ANALYZE index a36e9b0906..26452813e0 100644 --- a/versions/actions/GITHUB_CODEQL_ACTION_ANALYZE +++ b/versions/actions/GITHUB_CODEQL_ACTION_ANALYZE @@ -1 +1 @@ -2.18.1 +2.18.3 diff --git a/versions/actions/GITHUB_CODEQL_ACTION_AUTOBUILD b/versions/actions/GITHUB_CODEQL_ACTION_AUTOBUILD index a36e9b0906..26452813e0 100644 --- a/versions/actions/GITHUB_CODEQL_ACTION_AUTOBUILD +++ b/versions/actions/GITHUB_CODEQL_ACTION_AUTOBUILD @@ -1 +1 @@ -2.18.1 +2.18.3 diff --git a/versions/actions/GITHUB_CODEQL_ACTION_INIT b/versions/actions/GITHUB_CODEQL_ACTION_INIT index a36e9b0906..26452813e0 100644 --- a/versions/actions/GITHUB_CODEQL_ACTION_INIT +++ b/versions/actions/GITHUB_CODEQL_ACTION_INIT @@ -1 +1 @@ -2.18.1 +2.18.3 diff --git a/versions/actions/GITHUB_CODEQL_ACTION_UPLOAD_SARIF b/versions/actions/GITHUB_CODEQL_ACTION_UPLOAD_SARIF index a36e9b0906..26452813e0 100644 --- a/versions/actions/GITHUB_CODEQL_ACTION_UPLOAD_SARIF +++ b/versions/actions/GITHUB_CODEQL_ACTION_UPLOAD_SARIF @@ -1 +1 @@ -2.18.1 +2.18.3 diff --git a/versions/actions/GITHUB_ISSUE_METRICS b/versions/actions/GITHUB_ISSUE_METRICS index 19811903a7..a5c4c76339 100644 --- a/versions/actions/GITHUB_ISSUE_METRICS +++ b/versions/actions/GITHUB_ISSUE_METRICS @@ -1 +1 @@ -3.8.0 +3.9.0 diff --git a/versions/actions/PETER_EVANS_CREATE_PULL_REQUEST b/versions/actions/PETER_EVANS_CREATE_PULL_REQUEST index dfda3e0b4f..9fe9ff9d99 100644 --- a/versions/actions/PETER_EVANS_CREATE_PULL_REQUEST +++ b/versions/actions/PETER_EVANS_CREATE_PULL_REQUEST @@ -1 +1 @@ -6.1.0 +7.0.1 diff --git a/versions/actions/SHOGO82148_ACTIONS_UPLOAD_RELEASE_ASSET b/versions/actions/SHOGO82148_ACTIONS_UPLOAD_RELEASE_ASSET index 6a126f402d..91c74a5898 100644 --- a/versions/actions/SHOGO82148_ACTIONS_UPLOAD_RELEASE_ASSET +++ b/versions/actions/SHOGO82148_ACTIONS_UPLOAD_RELEASE_ASSET @@ -1 +1 @@ -1.7.5 +1.7.7