From d8ff7d5cc59c7df2b9435e1c291a52db9c0efa4b Mon Sep 17 00:00:00 2001 From: Filip Burlacu Date: Tue, 29 Aug 2023 17:18:55 -0400 Subject: [PATCH] feat: remove afgo-ext mongodb dependency (#1391) - arieskms mongo store implementation based on vcs mongo store - port mongodb store for wallet-cli Signed-off-by: Filip Burlacu --- component/credentialstatus/go.mod | 5 +- component/credentialstatus/go.sum | 3 + component/event/go.mod | 3 + component/event/go.sum | 3 + component/profile/reader/file/go.mod | 3 + component/profile/reader/file/go.sum | 3 + component/wallet-cli/go.mod | 28 +- component/wallet-cli/go.sum | 39 + .../internal/storage/mongodb/store.go | 1682 +++++++ .../internal/storage/mongodb/store_test.go | 2286 ++++++++++ .../internal/storage/mongodb/support_test.go | 3914 +++++++++++++++++ .../pkg/walletrunner/wallet_runner.go | 4 +- go.mod | 4 +- pkg/kms/arieskms.go | 34 +- pkg/kms/arieskms_test.go | 13 + .../mongodb/arieskmsstore/aries_kms_store.go | 95 + .../mongodb/cslindexstore/csl_index_store.go | 4 +- .../mongodb/cslvcstore/csl_vc_store.go | 4 +- pkg/storage/mongodb/internal/util.go | 119 + .../mongodb/vcstatusstore/vc_status_store.go | 4 +- test/bdd/go.mod | 3 + test/bdd/go.sum | 3 + test/stress/go.mod | 3 + test/stress/go.sum | 3 + 24 files changed, 8223 insertions(+), 39 deletions(-) create mode 100644 component/wallet-cli/internal/storage/mongodb/store.go create mode 100644 component/wallet-cli/internal/storage/mongodb/store_test.go create mode 100644 component/wallet-cli/internal/storage/mongodb/support_test.go create mode 100644 pkg/storage/mongodb/arieskmsstore/aries_kms_store.go create mode 100644 pkg/storage/mongodb/internal/util.go diff --git a/component/credentialstatus/go.mod b/component/credentialstatus/go.mod index 3ad53a32c..32a662ba7 100644 --- a/component/credentialstatus/go.mod +++ b/component/credentialstatus/go.mod @@ -9,7 +9,6 @@ go 1.21 require ( github.com/golang/mock v1.6.0 github.com/google/uuid v1.3.0 - github.com/hyperledger/aries-framework-go v0.3.3-0.20230828151543-984699876d28 github.com/hyperledger/aries-framework-go-ext/component/vdr/longform v0.0.0-20221201213446-c4c1e76daa49 github.com/hyperledger/aries-framework-go/component/kmscrypto v0.0.0-20230822161205-69119012ed5c github.com/hyperledger/aries-framework-go/component/models v0.0.0-20230828151543-984699876d28 @@ -60,6 +59,8 @@ require ( github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fxamacker/cbor/v2 v2.3.0 // indirect github.com/go-jose/go-jose/v3 v3.0.1-0.20221117193127-916db76e8214 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/glog v1.1.1 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect @@ -71,6 +72,7 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hyperledger/aries-framework-go v0.3.3-0.20230828151543-984699876d28 // indirect github.com/hyperledger/aries-framework-go-ext/component/storage/mongodb v0.0.0-20220728172020-0a8903e45149 // indirect github.com/hyperledger/aries-framework-go-ext/component/vdr/orb v1.0.0-rc5.0.20221201213446-c4c1e76daa49 // indirect github.com/hyperledger/aries-framework-go-ext/component/vdr/sidetree v1.0.0-rc3.0.20221104150937-07bfbe450122 // indirect @@ -148,6 +150,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect go.mongodb.org/mongo-driver v1.11.4 // indirect + go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 // indirect go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/atomic v1.9.0 // indirect diff --git a/component/credentialstatus/go.sum b/component/credentialstatus/go.sum index 5a056780b..d9d9d900d 100644 --- a/component/credentialstatus/go.sum +++ b/component/credentialstatus/go.sum @@ -337,6 +337,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -1063,6 +1064,8 @@ go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 h1:hATJDiGtTPWglqQRlWUiT5df32bOu9AJV41djhfF4Ig= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0/go.mod h1:nkEFz9FW/KZC65rsd8yrHm4aBKa5STMpe4/Xb5+LG64= go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= diff --git a/component/event/go.mod b/component/event/go.mod index 084b6e07a..ee94d22d3 100644 --- a/component/event/go.mod +++ b/component/event/go.mod @@ -55,6 +55,8 @@ require ( github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fxamacker/cbor/v2 v2.3.0 // indirect github.com/go-jose/go-jose/v3 v3.0.1-0.20221117193127-916db76e8214 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/glog v1.1.1 // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -148,6 +150,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect go.mongodb.org/mongo-driver v1.11.4 // indirect + go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 // indirect go.opentelemetry.io/otel v1.14.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect diff --git a/component/event/go.sum b/component/event/go.sum index c66d53dff..aeb18970c 100644 --- a/component/event/go.sum +++ b/component/event/go.sum @@ -337,6 +337,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -1063,6 +1064,8 @@ go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 h1:hATJDiGtTPWglqQRlWUiT5df32bOu9AJV41djhfF4Ig= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0/go.mod h1:nkEFz9FW/KZC65rsd8yrHm4aBKa5STMpe4/Xb5+LG64= go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= diff --git a/component/profile/reader/file/go.mod b/component/profile/reader/file/go.mod index 2e959abf4..2deafba97 100644 --- a/component/profile/reader/file/go.mod +++ b/component/profile/reader/file/go.mod @@ -58,6 +58,8 @@ require ( github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fxamacker/cbor/v2 v2.3.0 // indirect github.com/go-jose/go-jose/v3 v3.0.1-0.20221117193127-916db76e8214 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/glog v1.1.1 // indirect github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect @@ -150,6 +152,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect go.mongodb.org/mongo-driver v1.11.4 // indirect + go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 // indirect go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/atomic v1.9.0 // indirect diff --git a/component/profile/reader/file/go.sum b/component/profile/reader/file/go.sum index 8e30b48a4..7af20abe8 100644 --- a/component/profile/reader/file/go.sum +++ b/component/profile/reader/file/go.sum @@ -337,6 +337,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -1068,6 +1069,8 @@ go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 h1:hATJDiGtTPWglqQRlWUiT5df32bOu9AJV41djhfF4Ig= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0/go.mod h1:nkEFz9FW/KZC65rsd8yrHm4aBKa5STMpe4/Xb5+LG64= go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= diff --git a/component/wallet-cli/go.mod b/component/wallet-cli/go.mod index 89a4125e3..ea1342779 100644 --- a/component/wallet-cli/go.mod +++ b/component/wallet-cli/go.mod @@ -7,11 +7,11 @@ module github.com/trustbloc/vcs/component/wallet-cli go 1.21 require ( + github.com/cenkalti/backoff/v4 v4.2.0 github.com/cli/browser v1.1.0 github.com/golang/mock v1.6.0 github.com/google/uuid v1.3.0 github.com/henvic/httpretty v0.1.0 - github.com/hyperledger/aries-framework-go-ext/component/storage/mongodb v0.0.0-20220728172020-0a8903e45149 github.com/hyperledger/aries-framework-go-ext/component/vdr/jwk v0.0.0-20221213152252-f0c83a5a922c github.com/hyperledger/aries-framework-go-ext/component/vdr/longform v0.0.0-20221201213446-c4c1e76daa49 github.com/hyperledger/aries-framework-go-ext/component/vdr/orb v1.0.0-rc5.0.20221201213446-c4c1e76daa49 @@ -23,17 +23,23 @@ require ( github.com/hyperledger/aries-framework-go/component/vdr v0.0.0-20230622171716-43af8054a539 github.com/hyperledger/aries-framework-go/spi v0.0.0-20230517133327-301aa0597250 github.com/makiuchi-d/gozxing v0.1.1 + github.com/ory/dockertest/v3 v3.9.1 github.com/piprate/json-gold v0.5.1-0.20230111113000-6ddbe6e6f19f + github.com/pkg/errors v0.9.1 github.com/samber/lo v1.38.1 github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.4 github.com/trustbloc/vcs v0.1.9-0.20230210204445-f2870a36f0ea github.com/valyala/fastjson v1.6.3 + go.mongodb.org/mongo-driver v1.11.4 golang.org/x/oauth2 v0.7.0 ) require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/IBM/mathlib v0.0.3-0.20230605104224-932ab92f2ce0 // indirect + github.com/Microsoft/go-winio v0.6.0 // indirect + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/PaesslerAG/gval v1.2.0 // indirect github.com/PaesslerAG/jsonpath v0.1.1 // indirect github.com/VictoriaMetrics/fastcache v1.5.7 // indirect @@ -55,15 +61,19 @@ require ( github.com/bluele/gcache v0.0.2 // indirect github.com/btcsuite/btcd v0.22.3 // indirect github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce // indirect - github.com/cenkalti/backoff/v4 v4.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/consensys/bavard v0.1.13 // indirect github.com/consensys/gnark-crypto v0.9.1 // indirect + github.com/containerd/continuity v0.3.0 // indirect github.com/creasty/defaults v1.7.0 // indirect github.com/dave/jennifer v1.6.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deepmap/oapi-codegen v1.11.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/docker/cli v20.10.21+incompatible // indirect + github.com/docker/docker v20.10.24+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/ecordell/optgen v0.0.9 // indirect github.com/evanphx/json-patch v4.11.0+incompatible // indirect @@ -73,12 +83,16 @@ require ( github.com/getkin/kin-openapi v0.94.0 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-jose/go-jose/v3 v3.0.1-0.20221117193127-916db76e8214 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/swag v0.22.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.1.1 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/certificate-transparency-go v1.1.2-0.20210512142713-bed466244fa6 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/tink/go v1.7.0 // indirect github.com/google/trillian v1.3.14-0.20210520152752-ceda464a95a3 // indirect github.com/gorilla/mux v1.8.0 // indirect @@ -87,10 +101,12 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hyperledger/aries-framework-go v0.3.3-0.20230828151543-984699876d28 // indirect + github.com/hyperledger/aries-framework-go-ext/component/storage/mongodb v0.0.0-20220728172020-0a8903e45149 // indirect github.com/hyperledger/aries-framework-go-ext/component/vdr/sidetree v1.0.0-rc3.0.20221104150937-07bfbe450122 // indirect github.com/hyperledger/aries-framework-go/component/log v0.0.0-20230427134832-0c9969493bd3 // indirect github.com/hyperledger/fabric-amcl v0.0.0-20230602173724-9e02669dceb2 // indirect github.com/hyperledger/ursa-wrapper-go v0.3.1 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ipfs/go-cid v0.0.7 // indirect github.com/jinzhu/copier v0.3.5 // indirect @@ -111,6 +127,7 @@ require ( github.com/minio/sha256-simd v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/montanaflynn/stats v0.6.6 // indirect github.com/mr-tron/base58 v1.2.0 // indirect @@ -119,6 +136,9 @@ require ( github.com/multiformats/go-multibase v0.1.1 // indirect github.com/multiformats/go-multihash v0.0.14 // indirect github.com/multiformats/go-varint v0.0.6 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2 // indirect + github.com/opencontainers/runc v1.1.5 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/ory/fosite v0.44.0 // indirect github.com/ory/go-acc v0.2.9-0.20230103102148-6b1c9a70dbbe // indirect @@ -126,7 +146,6 @@ require ( github.com/ory/x v0.0.573 // indirect github.com/pborman/uuid v1.2.1 // indirect github.com/pelletier/go-toml/v2 v2.0.9 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pquerna/cachecontrol v0.1.0 // indirect github.com/prometheus/client_golang v1.13.0 // indirect @@ -134,6 +153,7 @@ require ( github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/cast v1.5.1 // indirect @@ -164,7 +184,7 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect - go.mongodb.org/mongo-driver v1.11.4 // indirect + go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 // indirect go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/atomic v1.9.0 // indirect diff --git a/component/wallet-cli/go.sum b/component/wallet-cli/go.sum index 3da3fcd9b..1204d1196 100644 --- a/component/wallet-cli/go.sum +++ b/component/wallet-cli/go.sum @@ -209,9 +209,11 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/cli/browser v1.1.0 h1:xOZBfkfY9L9vMBgqb1YwRirGu6QFaQ5dP/vXt5ENSOY= github.com/cli/browser v1.1.0/go.mod h1:HKMQAt9t12kov91Mn7RfZxyJQQgWgyS/3SZswlZ5iTI= @@ -228,6 +230,7 @@ github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/Yj github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= github.com/consensys/gnark-crypto v0.9.1 h1:mru55qKdWl3E035hAoh1jj9d7hVnYY5pfb6tmovSmII= github.com/consensys/gnark-crypto v0.9.1/go.mod h1:a2DQL4+5ywF6safEeZFEPGRiiGbjzGFRUN2sg06VuU4= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -239,6 +242,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= @@ -249,12 +253,14 @@ github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuv github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creasty/defaults v1.7.0 h1:eNdqZvc5B509z18lD8yc212CAqJNvfT1Jq6L8WowdBA= github.com/creasty/defaults v1.7.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= github.com/cristalhq/jwt/v4 v4.0.2 h1:g/AD3h0VicDamtlM70GWGElp8kssQEv+5wYd7L9WOhU= github.com/cristalhq/jwt/v4 v4.0.2/go.mod h1:HnYraSNKDRag1DZP92rYHyrjyQHnVEHPNqesmzs+miQ= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/dave/jennifer v1.6.1 h1:T4T/67t6RAA5AIV6+NP8Uk/BIsXgDoqEowgycdQQLuk= github.com/dave/jennifer v1.6.1/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -281,6 +287,7 @@ github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPa github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -318,6 +325,7 @@ github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -352,6 +360,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -373,9 +382,13 @@ github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= +github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -690,6 +703,8 @@ github.com/lestrrat-go/jwx v1.2.24/go.mod h1:zoNuZymNl5lgdcu6P7K6ie2QRll5HVfF4xw github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= @@ -769,6 +784,7 @@ github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae h1:O4SWKdcHVCvYqyDV+9CJA1fcDN2L11Bule0iFy3YlAI= github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -785,6 +801,7 @@ github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVq github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= @@ -839,6 +856,8 @@ github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7X github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs= github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -958,6 +977,7 @@ github.com/samber/lo v1.38.1/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXn github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -968,6 +988,7 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -1032,6 +1053,7 @@ github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/teserakt-io/golang-ed25519 v0.0.0-20210104091850-3888c087a4c8 h1:RBkacARv7qY5laaXGlF4wFB/tk5rnthhPb8oIBGoagY= @@ -1084,6 +1106,8 @@ github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4x github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c h1:GGsyl0dZ2jJgVT+VvWBf/cNijrHRhkrTjkmp5wg7li0= github.com/whyrusleeping/tar-utils v0.0.0-20180509141711-8c6c8ba81d5c/go.mod h1:xxcJeBb7SIUl/Wzkz1eVKJE/CB34YNrqX2TQI6jY9zs= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -1144,6 +1168,8 @@ go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 h1:hATJDiGtTPWglqQRlWUiT5df32bOu9AJV41djhfF4Ig= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0/go.mod h1:nkEFz9FW/KZC65rsd8yrHm4aBKa5STMpe4/Xb5+LG64= go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= @@ -1361,12 +1387,14 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1412,15 +1440,20 @@ golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1469,6 +1502,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1669,6 +1703,7 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= @@ -1710,8 +1745,12 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I= +gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/component/wallet-cli/internal/storage/mongodb/store.go b/component/wallet-cli/internal/storage/mongodb/store.go new file mode 100644 index 000000000..d759a0880 --- /dev/null +++ b/component/wallet-cli/internal/storage/mongodb/store.go @@ -0,0 +1,1682 @@ +/* +Copyright Scoir Inc, Gen Digital Inc. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +// Package mongodb implements a storage provider conforming to the storage interface in aries-framework-go. +// It is compatible with MongoDB v4.0.0, v4.2.8, and v5.0.0. It is also compatible with Amazon DocumentDB 4.0.0. +// It may be compatible with other versions, but they haven't been tested. +package mongodb + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "log" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/hyperledger/aries-framework-go/spi/storage" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + mongooptions "go.mongodb.org/mongo-driver/mongo/options" +) + +const ( + defaultTimeout = time.Second * 10 + defaultMaxIndexCreationConflictRetries = 3 + + invalidTagName = `"%s" is an invalid tag name since it contains one or more of the ` + + `following substrings: ":", "<=", "<", ">=", ">"` + invalidTagValue = `"%s" is an invalid tag value since it contains one or more of the ` + + `following substrings: ":", "<=", "<", ">=", ">"` + failCreateIndexesInMongoDBCollection = "failed to create indexes in MongoDB collection: %w" + + equalsExpressionTagNameOnlyLength = 1 + equalsExpressionTagNameAndValueLength = 2 + lessThanGreaterThanExpressionLength +) + +var ( + errInvalidQueryExpressionFormat = errors.New("invalid expression format. " + + "It must be in the following format: " + + "TagName:TagValue or TagName1:TagValue1&&TagName2:TagValue2. Tag values are optional. If using tag values," + + "<=, <, >=, or > may be used in place of the : to match a range of tag values") + + // errUnmarshalBytesIntoMap is used in the convertMarshalledValueToMap function to allow the generateDataWrapper + // function to differentiate between an unmarshal failure and other types of failures. + errUnmarshalBytesIntoMap = errors.New("failed to unmarshal bytes into map") +) + +type logger interface { + Infof(msg string, args ...interface{}) +} + +type defaultLogger struct { + logger *log.Logger +} + +func (d *defaultLogger) Infof(msg string, args ...interface{}) { + d.logger.Printf(msg, args...) +} + +type closer func(storeName string) + +type dataWrapper struct { + Key string `bson:"_id"` + Doc map[string]interface{} `bson:"doc,omitempty"` + Str string `bson:"str,omitempty"` + Bin []byte `bson:"bin,omitempty"` + Tags map[string]interface{} `bson:"tags,omitempty"` +} + +// Option represents an option for a MongoDB Provider. +type Option func(opts *Provider) + +// WithDBPrefix is an option for adding a prefix to all created database names. +// No prefix will be used by default. +func WithDBPrefix(dbPrefix string) Option { + return func(opts *Provider) { + opts.dbPrefix = dbPrefix + } +} + +// WithLogger is an option for specifying a custom logger. +// The standard Golang logger will be used by default. +func WithLogger(logger logger) Option { + return func(opts *Provider) { + opts.logger = logger + } +} + +// WithTimeout is an option for specifying the timeout for all calls to MongoDB. +// The timeout is 10 seconds by default. +func WithTimeout(timeout time.Duration) Option { + return func(opts *Provider) { + opts.timeout = timeout + } +} + +// WithMaxRetries is an option for specifying how many retries are allowed when there are certain transient errors +// from MongoDB. These transient errors can happen in two situations: +// 1. An index conflict error when setting indexes via the SetStoreConfig method from multiple MongoDB Provider +// objects that look at the same stores (which might happen if you have multiple running instances of a service). +// 2. If you're using MongoDB 4.0.0 (or DocumentDB 4.0.0), a "dup key" type of error when calling Store.Put or +// Store.Batch from multiple MongoDB Provider objects that look at the same stores. +// +// maxRetries must be > 0. If not set (or set to an invalid value), it will default to 3. +func WithMaxRetries(maxRetries uint64) Option { + return func(opts *Provider) { + opts.maxRetries = maxRetries + } +} + +// WithTimeBetweenRetries is an option for specifying how long to wait between retries when +// there are certain transient errors from MongoDB. These transient errors can happen in two situations: +// 1. An index conflict error when setting indexes via the SetStoreConfig method from multiple MongoDB Provider +// objects that look at the same stores (which might happen if you have multiple running instances of a service). +// 2. If you're using MongoDB 4.0.0 (or DocumentDB 4.0.0), a "dup key" type of error when calling Store.Put or +// Store.Batch multiple times in parallel on the same key. +// +// Defaults to two seconds if not set. +func WithTimeBetweenRetries(timeBetweenRetries time.Duration) Option { + return func(opts *Provider) { + opts.timeBetweenRetries = timeBetweenRetries + } +} + +// Provider represents a MongoDB/DocumentDB implementation of the storage.Provider interface. +type Provider struct { + client *mongo.Client + openStores map[string]*Store + dbPrefix string + lock sync.RWMutex + logger logger + timeout time.Duration + maxRetries uint64 + timeBetweenRetries time.Duration +} + +// NewProvider instantiates a new MongoDB Provider. +// connString is a connection string as defined in https://docs.mongodb.com/manual/reference/connection-string/. +// Note that options supported by the Go Mongo driver (and the names of them) may differ from the documentation above. +// Check the Go Mongo driver (go.mongodb.org/mongo-driver/mongo) to make sure the options you're specifying +// are supported and will be captured correctly. +// If using DocumentDB, the retryWrites option must be set to false in the connection string (retryWrites=false) in +// order for it to work. +func NewProvider(connString string, opts ...Option) (*Provider, error) { + p := &Provider{openStores: map[string]*Store{}} + + setOptions(opts, p) + + client, err := mongo.NewClient(mongooptions.Client().ApplyURI(connString)) + if err != nil { + return nil, fmt.Errorf("failed to create a new MongoDB client: %w", err) + } + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), p.timeout) + defer cancel() + + err = client.Connect(ctxWithTimeout) + if err != nil { + return nil, fmt.Errorf("failed to connect to MongoDB: %w", err) + } + + p.client = client + + return p, nil +} + +// OpenStore opens a Store with the given name and returns a handle. +// If the underlying database for the given name has never been created before, then it is created. +// Store names are not case-sensitive. If name is blank, then an error will be returned. +func (p *Provider) OpenStore(name string) (storage.Store, error) { + if name == "" { + return nil, fmt.Errorf("store name cannot be empty") + } + + name = strings.ToLower(p.dbPrefix + name) + + p.lock.Lock() + defer p.lock.Unlock() + + openStore, ok := p.openStores[name] + if ok { + return openStore, nil + } + + newStore := &Store{ + // The storage interface doesn't have the concept of a nested database, so we have no real use for the + // collection abstraction MongoDB uses. Since we have to use at least one collection, we keep the collection + // name as short as possible to avoid hitting the index size limit. + coll: p.getCollectionHandle(name), + name: name, + logger: p.logger, + close: p.removeStore, + timeout: p.timeout, + maxRetries: p.maxRetries, + timeBetweenRetries: p.timeBetweenRetries, + } + + p.openStores[name] = newStore + + return newStore, nil +} + +// SetStoreConfig sets the configuration on a Store. +// Indexes are created based on the tag names in config. This allows the Store.Query method to operate faster. +// Existing tag names/indexes in the Store that are not in the config passed in here will be removed. +// The Store must already be open in this provider from a prior call to OpenStore. The name parameter cannot be blank. +func (p *Provider) SetStoreConfig(storeName string, config storage.StoreConfiguration) error { + for _, tagName := range config.TagNames { + if strings.Contains(tagName, ":") { + return fmt.Errorf(invalidTagName, tagName) + } + } + + storeName = strings.ToLower(p.dbPrefix + storeName) + + openStore, found := p.openStores[storeName] + if !found { + return storage.ErrStoreNotFound + } + + var attemptsMade int + + err := backoff.Retry(func() error { + attemptsMade++ + + err := p.setIndexes(openStore, config) + if err != nil { + // If there are multiple MongoDB Providers trying to set Store configurations, it's possible + // to get an error. In cases where those multiple MongoDB providers are trying + // to set the exact same Store configuration, retrying here allows them to succeed without failing + // unnecessarily. + if isIndexConflictErrorMessage(err) { + p.logger.Infof("[Store name: %s] Attempt %d - error while setting indexes. "+ + "This can happen if multiple MongoDB providers set the store configuration at the "+ + "same time. If there are remaining retries, this operation will be tried again after %s. "+ + "Underlying error message: %s", + storeName, attemptsMade, p.timeBetweenRetries.String(), err.Error()) + + // The error below isn't marked using backoff.Permanent, so it'll only be seen if the retry limit + // is reached. + return fmt.Errorf("failed to set indexes after %d attempts. This storage provider may "+ + "need to be started with a higher max retry limit and/or higher time between retries. "+ + "Underlying error message: %w", attemptsMade, err) + } + + // This is an unexpected error. + return backoff.Permanent(fmt.Errorf("failed to set indexes: %w", err)) + } + + p.logger.Infof("[Store name: %s] Attempt %d - successfully set indexes.", + storeName, attemptsMade) + + return nil + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(p.timeBetweenRetries), p.maxRetries)) + if err != nil { + return err + } + + return nil +} + +// GetStoreConfig gets the current Store configuration. +// If the underlying database for the given name has never been +// created by a call to OpenStore at some point, then an error wrapping ErrStoreNotFound will be returned. This +// method will not open a Store in the Provider. +// If name is blank, then an error will be returned. +func (p *Provider) GetStoreConfig(name string) (storage.StoreConfiguration, error) { + name = strings.ToLower(p.dbPrefix + name) + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), p.timeout) + defer cancel() + + databaseNames, err := p.client.ListDatabaseNames(ctxWithTimeout, bson.D{{Key: "name", Value: name}}) + if err != nil { + return storage.StoreConfiguration{}, fmt.Errorf("failed to determine if the underlying database "+ + "exists for %s: %w", name, err) + } + + if len(databaseNames) == 0 { + // MongoDB defers the creation of the collection until the first actual storage call. + // In the case where someone calls OpenStore and then GetStoreConfig immediately, MongoDB will report that + // the database doesn't exist, but logically from the point of view of this object it does indeed exist, + // so we shouldn't return an ErrStoreNotFound. + _, exists := p.openStores[name] + if !exists { + return storage.StoreConfiguration{}, storage.ErrStoreNotFound + } + + return storage.StoreConfiguration{}, nil + } + + existingIndexedTagNames, err := p.getExistingIndexedTagNames(p.getCollectionHandle(name)) + if err != nil { + return storage.StoreConfiguration{}, fmt.Errorf("failed to get existing indexed tag names: %w", err) + } + + return storage.StoreConfiguration{TagNames: existingIndexedTagNames}, nil +} + +// GetOpenStores returns all Stores currently open in this Provider. +func (p *Provider) GetOpenStores() []storage.Store { + p.lock.RLock() + defer p.lock.RUnlock() + + openStores := make([]storage.Store, len(p.openStores)) + + var counter int + + for _, openStore := range p.openStores { + openStores[counter] = openStore + counter++ + } + + return openStores +} + +// Close closes all stores created under this Store provider. +func (p *Provider) Close() error { + p.lock.RLock() + + openStoresSnapshot := make([]*Store, len(p.openStores)) + + var counter int + + for _, openStore := range p.openStores { + openStoresSnapshot[counter] = openStore + counter++ + } + p.lock.RUnlock() + + for _, openStore := range openStoresSnapshot { + err := openStore.Close() + if err != nil { + return fmt.Errorf(`failed to close open store with name "%s": %w`, openStore.name, err) + } + } + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), p.timeout) + defer cancel() + + err := p.client.Disconnect(ctxWithTimeout) + if err != nil { + if err.Error() == "client is disconnected" { + return nil + } + + return fmt.Errorf("failed to disconnect from MongoDB: %w", err) + } + + return nil +} + +// Ping verifies whether the MongoDB client can successfully connect to the deployment specified by +// the connection string used in the NewProvider call. +func (p *Provider) Ping() error { + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), p.timeout) + defer cancel() + + return p.client.Ping(ctxWithTimeout, nil) +} + +// CreateCustomIndexes allows for any custom indexes to be created in MongoDB based on the given index models. +// Intended for use alongside the Store.PutAsJSON, Store.GetAsRawMap and Iterator.ValueAsRawMap methods. +func (p *Provider) CreateCustomIndexes(storeName string, models ...mongo.IndexModel) error { + storeName = strings.ToLower(p.dbPrefix + storeName) + + store, exists := p.openStores[storeName] + if !exists { + return storage.ErrStoreNotFound + } + + err := p.createIndexes(store, models) + if err != nil { + return fmt.Errorf(failCreateIndexesInMongoDBCollection, err) + } + + return nil +} + +func (p *Provider) removeStore(name string) { + p.lock.Lock() + defer p.lock.Unlock() + + _, ok := p.openStores[name] + if ok { + delete(p.openStores, name) + } +} + +const ( + collectionPrefix = "cli_mongo_generic-" +) + +func (p *Provider) getCollectionHandle(name string) *mongo.Collection { + return p.client.Database(name).Collection("c") +} + +func (p *Provider) setIndexes(openStore *Store, config storage.StoreConfiguration) error { + tagNamesNeedIndexCreation, err := p.determineTagNamesNeedIndexCreation(openStore, config) + if err != nil { + return err + } + + if len(tagNamesNeedIndexCreation) > 0 { + models := make([]mongo.IndexModel, len(tagNamesNeedIndexCreation)) + + for i, tagName := range tagNamesNeedIndexCreation { + indexOptions := mongooptions.Index() + indexOptions.SetName(tagName) + + models[i] = mongo.IndexModel{ + Keys: bson.D{{Key: fmt.Sprintf("tags.%s", tagName), Value: 1}}, + Options: indexOptions, + } + } + + err = p.createIndexes(openStore, models) + if err != nil { + return err + } + } + + return nil +} + +func (p *Provider) determineTagNamesNeedIndexCreation(openStore *Store, + config storage.StoreConfiguration) ([]string, error) { + existingIndexedTagNames, err := p.getExistingIndexedTagNames(openStore.coll) + if err != nil { + return nil, fmt.Errorf("failed to get existing indexed tag names: %w", err) + } + + tagNameIndexesAlreadyConfigured := make(map[string]struct{}) + + for _, existingIndexedTagName := range existingIndexedTagNames { + var existingTagIsInNewConfig bool + + for _, tagName := range config.TagNames { + if existingIndexedTagName == tagName { + existingTagIsInNewConfig = true + tagNameIndexesAlreadyConfigured[tagName] = struct{}{} + + p.logger.Infof("[Store name (includes prefix, if any): %s] Skipping index creation for %s "+ + "since the index already exists.", openStore.name, tagName) + + break + } + } + + // If the new Store configuration doesn't have the existing index (tag) defined, then we will delete it + if !existingTagIsInNewConfig { + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), p.timeout) + + _, errDrop := openStore.coll.Indexes().DropOne(ctxWithTimeout, existingIndexedTagName) + if errDrop != nil { + cancel() + + return nil, fmt.Errorf("failed to remove index for %s: %w", existingIndexedTagName, errDrop) + } + + cancel() + } + } + + var tagNamesNeedIndexCreation []string + + for _, tag := range config.TagNames { + _, indexAlreadyCreated := tagNameIndexesAlreadyConfigured[tag] + if !indexAlreadyCreated { + tagNamesNeedIndexCreation = append(tagNamesNeedIndexCreation, tag) + } + } + + return tagNamesNeedIndexCreation, nil +} + +func (p *Provider) getExistingIndexedTagNames(collection *mongo.Collection) ([]string, error) { + indexesCursor, err := p.getIndexesCursor(collection) + if err != nil { + return nil, err + } + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), p.timeout) + defer cancel() + + var results []bson.M + + err = indexesCursor.All(ctxWithTimeout, &results) + if err != nil { + return nil, fmt.Errorf("failed to get all results from indexes cursor") + } + + if results == nil { + return nil, nil + } + + existingIndexedTagNames := make([]string, len(results)-1) + + var counter int + + for _, result := range results { + indexNameRaw, exists := result["name"] + if !exists { + return nil, errors.New(`index data is missing the "key" field`) + } + + indexName, ok := indexNameRaw.(string) + if !ok { + return nil, errors.New(`index name is of unexpected type`) + } + + // The _id_ index is a built-in index in MongoDB. It wasn't one that can be set using SetStoreConfig, + // so we omit it here. + if indexName == "_id_" { + continue + } + + existingIndexedTagNames[counter] = indexName + + counter++ + } + + return existingIndexedTagNames, nil +} + +func (p *Provider) getIndexesCursor(collection *mongo.Collection) (*mongo.Cursor, error) { + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), p.timeout) + defer cancel() + + indexesCursor, err := collection.Indexes().List(ctxWithTimeout) + if err != nil { + return nil, fmt.Errorf("failed to get list of indexes from MongoDB: %w", err) + } + + return indexesCursor, nil +} + +func (p *Provider) createIndexes(openStore *Store, models []mongo.IndexModel) error { + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), p.timeout) + defer cancel() + + _, err := openStore.coll.Indexes().CreateMany(ctxWithTimeout, models) + if err != nil { + return fmt.Errorf(failCreateIndexesInMongoDBCollection, err) + } + + return nil +} + +// Store represents a MongoDB/DocumentDB implementation of the storage.Store interface. +type Store struct { + name string + logger logger + coll *mongo.Collection + close closer + timeout time.Duration + maxRetries uint64 + timeBetweenRetries time.Duration +} + +// Put stores the key + value pair along with the (optional) tags. +// If tag values are valid int32 or int64, they will be stored as integers in MongoDB, so we can sort numerically later. +// If storing a JSON value, then any key names (within the JSON) cannot contain "`" characters. This is because we +// use it as a replacement for "." characters, which are not valid in DocumentDB as JSON key names. +func (s *Store) Put(key string, value []byte, tags ...storage.Tag) error { + err := validatePutInput(key, value, tags) + if err != nil { + return err + } + + data, err := generateDataWrapper(key, value, tags) + if err != nil { + return err + } + + return s.executeReplaceOneCommand(key, data) +} + +// PutAsJSON stores the given key and value. +// Value must be a struct with exported fields and proper json tags or a map. It will get marshalled before being +// converted to the format needed by the MongoDB driver. Value is stored directly in a MongoDB document without +// wrapping, with key being used as the _id field. Data stored this way must be retrieved using the GetAsRawMap method. +// When querying for this data, use the QueryCustom method, and when retrieving from the iterator use the +// iterator.ValueAsRawMap method. +func (s *Store) PutAsJSON(key string, value interface{}) error { + data, err := PrepareDataForBSONStorage(value) + if err != nil { + return err + } + + return s.executeReplaceOneCommand(key, data) +} + +// Get fetches the value associated with the given key. +// If key cannot be found, then an error wrapping ErrDataNotFound will be returned. +// If key is empty, then an error will be returned. +func (s *Store) Get(key string) ([]byte, error) { + result, err := s.runFindOneCommand(key) + if err != nil { + return nil, err + } + + _, value, err := getKeyAndValueFromMongoDBResult(result) + if err != nil { + return nil, fmt.Errorf("failed to get value from MongoDB result: %w", err) + } + + return value, nil +} + +// GetAsRawMap fetches the full MongoDB JSON document stored with the given id (_id field in MongoDB). +// The document is returned as a map (which includes the _id field). +func (s *Store) GetAsRawMap(id string) (map[string]interface{}, error) { + result, err := s.runFindOneCommand(id) + if err != nil { + return nil, err + } + + return getValueAsRawMapFromMongoDBResult(result) +} + +// GetTags fetches all tags associated with the given key. +// If key cannot be found, then an error wrapping ErrDataNotFound will be returned. +// If key is empty, then an error will be returned. +func (s *Store) GetTags(key string) ([]storage.Tag, error) { + result, err := s.runFindOneCommand(key) + if err != nil { + return nil, err + } + + tags, err := getTagsFromMongoDBResult(result) + if err != nil { + return nil, fmt.Errorf("failed to get tags from MongoDB result: %w", err) + } + + return tags, nil +} + +// GetBulk fetches the values associated with the given keys. +// If no data exists under a given key, then a nil []byte is returned for that value. It is not considered an error. +// Depending on the implementation, this method may be faster than calling Get for each key individually. +// If any of the given keys are empty, then an error will be returned. +// As of writing, aries-framework-go code does not use this, but it may be useful for custom solutions. +func (s *Store) GetBulk(keys ...string) ([][]byte, error) { + if len(keys) == 0 { + return nil, errors.New("keys slice must contain at least one key") + } + + for _, key := range keys { + if key == "" { + return nil, errors.New("key cannot be empty") + } + } + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + cursor, err := s.coll.Find(ctxWithTimeout, bson.M{"_id": bson.D{ + {Key: "$in", Value: keys}, + }}) + if err != nil { + return nil, fmt.Errorf("failed to run Find command in MongoDB: %w", err) + } + + allValues, err := s.collectBulkGetResults(keys, cursor) + if err != nil { + return nil, err + } + + return allValues, nil +} + +// GetBulkAsRawMap fetches the values associated with the given keys and returns the documents (as maps). +// If no data exists under a given key, then nil is returned for that value. It is not considered an error. +// Depending on the implementation, this method may be faster than calling Get for each key individually. +// If any of the given keys are empty, then an error will be returned. +func (s *Store) GetBulkAsRawMap(keys ...string) ([]map[string]interface{}, error) { + if len(keys) == 0 { + return nil, errors.New("keys slice must contain at least one key") + } + + for _, key := range keys { + if key == "" { + return nil, errors.New("key cannot be empty") + } + } + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + cursor, err := s.coll.Find(ctxWithTimeout, bson.M{"_id": bson.D{ + {Key: "$in", Value: keys}, + }}) + if err != nil { + return nil, fmt.Errorf("failed to run Find command in MongoDB: %w", err) + } + + allValues, err := s.collectBulkGetResultsAsRawMap(keys, cursor) + if err != nil { + return nil, err + } + + return allValues, nil +} + +// Query does a query for data as defined by the documentation in storage.Store (the interface). +// This implementation also supports querying for data tagged with multiple tag name + value pairs (using AND logic). +// To do this, separate the tag name + value pairs using &&. You can still omit one or both of the tag values +// in order to indicate that you want any data tagged with the tag name, regardless of tag value. +// For example, TagName1:TagValue1&&TagName2:TagValue2:...:TagNameN:TagValueN will return only data that has been +// tagged with all pairs. See testQueryWithMultipleTags in store_test.go for more examples of querying using multiple +// tags. If the tag you're using has tag values that are integers, then you can use the <, <=, >, >= operators instead +// of : to get a range of matching data. For example, TagName>3 will return any data tagged with a tag named TagName +// that has a value greater than 3. +// It's recommended to set up an index using the Provider.SetStoreConfig method in order to speed up queries. +// TODO (#146) Investigate compound indexes and see if they may be useful for queries with sorts and/or for queries +// +// with multiple tags. +func (s *Store) Query(expression string, options ...storage.QueryOption) (storage.Iterator, error) { + if expression == "" { + return &iterator{}, errInvalidQueryExpressionFormat + } + + filter, err := PrepareFilter(strings.Split(expression, "&&"), false) + if err != nil { + return nil, err + } + + findOptions := s.CreateMongoDBFindOptions(options, false) + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + cursor, err := s.coll.Find(ctxWithTimeout, filter, findOptions) + if err != nil { + return nil, fmt.Errorf("failed to run Find command in MongoDB: %w", err) + } + + return &iterator{ + cursor: cursor, + coll: s.coll, + filter: filter, + timeout: s.timeout, + }, nil +} + +// Iterator represents a MongoDB/DocumentDB implementation of the storage.Iterator interface. +type Iterator interface { + storage.Iterator + + ValueAsRawMap() (map[string]interface{}, error) +} + +// QueryCustom queries for data using the MongoDB find command. The given filter and options are passed directly to the +// driver. Intended for use alongside the Provider.CreateCustomIndexes, Store.PutAsJSON, and +// Iterator.ValueAsRawMap methods. +func (s *Store) QueryCustom(filter interface{}, options ...*mongooptions.FindOptions) (Iterator, error) { + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + cursor, err := s.coll.Find(ctxWithTimeout, filter, options...) + if err != nil { + return nil, fmt.Errorf("failed to run Find command in MongoDB: %w", err) + } + + return &iterator{ + cursor: cursor, + coll: s.coll, + filter: filter, + timeout: s.timeout, + customQuery: true, + }, nil +} + +// Delete deletes the value (and all tags) associated with key. +func (s *Store) Delete(key string) error { + if key == "" { + return errors.New("key is mandatory") + } + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + _, err := s.coll.DeleteOne(ctxWithTimeout, bson.M{"_id": key}) + if err != nil { + return fmt.Errorf("failed to run DeleteOne command in MongoDB: %w", err) + } + + return err +} + +// Batch performs multiple Put and/or Delete operations in order. +// If storing a JSON value, then any key names (within the JSON) cannot contain "`" characters. This is because we +// use it as a replacement for "." characters, which are not valid in DocumentDB as JSON key names. +// Put operations can be sped up by making use of the storage.PutOptions.IsNewKey option for any keys that you know +// for sure do not already exist in the database. If this option is used and the key does exist, then this method will +// return an error. +func (s *Store) Batch(operations []storage.Operation) error { + if len(operations) == 0 { + return errors.New("batch requires at least one operation") + } + + for _, operation := range operations { + if operation.Key == "" { + return errors.New("key cannot be empty") + } + } + + models := make([]mongo.WriteModel, len(operations)) + + var atLeastOneInsertOneModel bool + + for i, operation := range operations { + var err error + + var isInsertOneModel bool + + models[i], isInsertOneModel, err = generateModelForBulkWriteCall(operation) + if err != nil { + return err + } + + if isInsertOneModel { + atLeastOneInsertOneModel = true + } + } + + return s.executeBulkWriteCommand(models, atLeastOneInsertOneModel, nil) +} + +// BulkWrite executes the mongoDB BulkWrite command using the given WriteModels and BulkWriteOptions. +func (s *Store) BulkWrite(models []mongo.WriteModel, opts ...*mongooptions.BulkWriteOptions) error { + var atLeastOneInsertOneModel bool + + for _, model := range models { + _, isInsertOneModel := model.(*mongo.InsertOneModel) + if isInsertOneModel { + atLeastOneInsertOneModel = true + + break + } + } + + return s.executeBulkWriteCommand(models, atLeastOneInsertOneModel, opts) +} + +// Flush doesn't do anything since this store type doesn't queue values. +func (s *Store) Flush() error { + return nil +} + +// Close removes this Store from the parent Provider's list of open stores. It does not close this Store's connection +// to the database, since it's shared across stores. To close the connection you must call Provider.Close. +func (s *Store) Close() error { + s.close(s.name) + + return nil +} + +func (s *Store) executeReplaceOneCommand(key string, value interface{}) error { + opts := mongooptions.ReplaceOptions{} + opts.SetUpsert(true) + + var attemptsMade int + + return backoff.Retry(func() error { + attemptsMade++ + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + _, err := s.coll.ReplaceOne(ctxWithTimeout, bson.M{"_id": key}, value, &opts) + if err != nil { + // If using MongoDB 4.0.0 (or DocumentDB 4.0.0), and this is called multiple times in parallel on the + // same key, then it's possible to get a transient error here. We need to retry in this case. + if strings.Contains(err.Error(), "duplicate key error collection") { + s.logger.Infof(`[Store name: %s] Attempt %d - error while storing data under key "%s". `+ + "This can happen if there are multiple calls in parallel to Store data under the same key. "+ + "If there are remaining retries, this operation will be tried again after %s. "+ + "Underlying error message: %s", s.name, attemptsMade, key, s.timeBetweenRetries.String(), + err.Error()) + + // The error below isn't marked using backoff.Permanent, so it'll only be seen if the retry limit + // is reached. + return fmt.Errorf("failed to store data after %d attempts. This storage provider may "+ + "need to be started with a higher max retry limit and/or higher time between retries. "+ + "Underlying error message: %w", attemptsMade, err) + } + + // This is an unexpected error. + return backoff.Permanent(fmt.Errorf("failed to run ReplaceOne command in MongoDB: %w", err)) + } + + return nil + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(s.timeBetweenRetries), s.maxRetries)) +} + +func (s *Store) runFindOneCommand(id string) (*mongo.SingleResult, error) { + if id == "" { + return nil, errors.New("key is mandatory") + } + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + result := s.coll.FindOne(ctxWithTimeout, bson.M{"_id": id}) + if errors.Is(result.Err(), mongo.ErrNoDocuments) { + return nil, storage.ErrDataNotFound + } else if result.Err() != nil { + return nil, fmt.Errorf("failed to run FindOne command in MongoDB: %w", result.Err()) + } + + return result, nil +} + +func (s *Store) collectBulkGetResults(keys []string, cursor *mongo.Cursor) ([][]byte, error) { + allValues := make([][]byte, len(keys)) + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + for cursor.Next(ctxWithTimeout) { + key, value, err := getKeyAndValueFromMongoDBResult(cursor) + if err != nil { + return nil, fmt.Errorf("failed to get value from MongoDB result: %w", err) + } + + for i := 0; i < len(keys); i++ { + if key == keys[i] { + allValues[i] = value + + break + } + } + } + + return allValues, nil +} + +func (s *Store) collectBulkGetResultsAsRawMap(keys []string, cursor *mongo.Cursor) ([]map[string]interface{}, error) { + allValues := make([]map[string]interface{}, len(keys)) + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + for cursor.Next(ctxWithTimeout) { + key, value, err := getKeyAndRawMapFromMongoDBResult(cursor) + if err != nil { + return nil, fmt.Errorf("failed to get value from MongoDB result: %w", err) + } + + for i := 0; i < len(keys); i++ { + if key == keys[i] { + allValues[i] = value + + break + } + } + } + + return allValues, nil +} + +func (s *Store) executeBulkWriteCommand(models []mongo.WriteModel, atLeastOneInsertOneModel bool, + opts []*mongooptions.BulkWriteOptions) error { + var attemptsMade int + + return backoff.Retry(func() error { + attemptsMade++ + + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + _, err := s.coll.BulkWrite(ctxWithTimeout, models, opts...) + if err != nil { + // If using MongoDB 4.0.0 (or DocumentDB 4.0.0), and this is called multiple times in parallel on the + // same key(s), then it's possible to get a transient error here. We need to retry in this case. + if strings.Contains(err.Error(), "duplicate key error collection") { + // If the IsNewKey optimization is being used, then we generate a more informative log message and + // error. + + var errorReason string + + var errDuplicateKey error + + if atLeastOneInsertOneModel { + errorReason = "Either an InsertOne model is being used for a key that " + + "already exists in the database, or, if using MongoDB 4.0.0, then this may be a transient " + + "error due to another call storing data under the same key at the same time." + + // The "ErrDuplicateKey" error from the storage interface is used to indicate a failure due to + // the IsNewKey flag being used for a key that isn't new. A caller can check for this using + // errors.Is(). + errDuplicateKey = storage.ErrDuplicateKey + } else { + errorReason = "If using MongoDB 4.0.0, then this may be a transient " + + "error due to another call storing data under the same key at the same time." + + // While the text of this error matches the text from storage.ErrDuplicateKey, we don't use that + // specific error here since the meaning of storage.ErrDuplicateKey is specifically tied to the + // usage of the IsNewKey optimization. + errDuplicateKey = errors.New("duplicate key") + } + + s.logger.Infof("[Store name: %s] Attempt %d - %s while performing batch "+ + " operations. %s If there are remaining retries, the batch operations will be tried again "+ + "after %s. Underlying error message: %s", s.name, attemptsMade, storage.ErrDuplicateKey, + errorReason, s.timeBetweenRetries.String(), err.Error()) + + // The error below isn't marked using backoff.Permanent, so it'll only be seen if the retry limit + // is reached. + return fmt.Errorf("failed to perform batch operations after %d attempts: %w. "+ + "%s Underlying error message: %s", attemptsMade, errDuplicateKey, errorReason, + err.Error()) + } + + // This is an unexpected error. + return backoff.Permanent(fmt.Errorf("failed to run BulkWrite command in MongoDB: %w", err)) + } + + return nil + }, backoff.WithMaxRetries(backoff.NewConstantBackOff(s.timeBetweenRetries), s.maxRetries)) +} + +// CreateMongoDBFindOptions converts the given storage options into MongoDB options. +func (s *Store) CreateMongoDBFindOptions(options []storage.QueryOption, isJSONQuery bool) *mongooptions.FindOptions { + queryOptions := getQueryOptions(options) + + findOptions := mongooptions.Find() + + if queryOptions.PageSize > 0 || queryOptions.InitialPageNum > 0 { + findOptions = mongooptions.Find() + + findOptions.SetBatchSize(int32(queryOptions.PageSize)) + + if queryOptions.PageSize > 0 && queryOptions.InitialPageNum > 0 { + findOptions.SetSkip(int64(queryOptions.InitialPageNum * queryOptions.PageSize)) + } + } + + if queryOptions.SortOptions != nil { + mongoDBSortOrder := 1 + if queryOptions.SortOptions.Order == storage.SortDescending { + mongoDBSortOrder = -1 + } + + var key string + + if isJSONQuery { + key = queryOptions.SortOptions.TagName + } else { + key = fmt.Sprintf("tags.%s", queryOptions.SortOptions.TagName) + } + + findOptions.SetSort(bson.D{{ + Key: key, + Value: mongoDBSortOrder, + }}) + } + + return findOptions +} + +// iterator represents a MongoDB/DocumentDB implementation of the storage.Iterator interface. +type iterator struct { + cursor *mongo.Cursor + coll *mongo.Collection + filter interface{} + timeout time.Duration + customQuery bool +} + +// Next moves the pointer to the next entry in the iterator. +// Note that it must be called before accessing the first entry. +// It returns false if the iterator is exhausted - this is not considered an error. +func (i *iterator) Next() (bool, error) { + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), i.timeout) + defer cancel() + + return i.cursor.Next(ctxWithTimeout), nil +} + +// Key returns the key of the current entry. +func (i *iterator) Key() (string, error) { + key, _, err := getKeyAndValueFromMongoDBResult(i.cursor) + if err != nil { + return "", fmt.Errorf("failed to get key from MongoDB result: %w", err) + } + + return key, nil +} + +// Value returns the value of the current entry. +func (i *iterator) Value() ([]byte, error) { + _, value, err := getKeyAndValueFromMongoDBResult(i.cursor) + if err != nil { + return nil, fmt.Errorf("failed to get value from MongoDB result: %w", err) + } + + return value, nil +} + +// ValueAsRawMap returns the full MongoDB JSON document of the current entry. +// The document is returned as a map (which includes the _id field). +func (i *iterator) ValueAsRawMap() (map[string]interface{}, error) { + return getValueAsRawMapFromMongoDBResult(i.cursor) +} + +// Tags returns the tags associated with the key of the current entry. +// As of writing, aries-framework-go code does not use this, but it may be useful for custom solutions. +func (i *iterator) Tags() ([]storage.Tag, error) { + tags, err := getTagsFromMongoDBResult(i.cursor) + if err != nil { + return nil, fmt.Errorf("failed to get tags from MongoDB result: %w", err) + } + + return tags, nil +} + +// TODO (#147) Investigate using aggregates to get total items without doing a separate query. + +// TotalItems returns a count of the number of entries (key + value + tags triplets) matched by the query +// that generated this iterator. This count is not affected by the page settings used (i.e. the count is of all +// results as if you queried starting from the first page and with an unlimited page size). +// Depending on the storage implementation, you may need to ensure that the TagName used in the query is in the +// Store's StoreConfiguration before trying to call this method (or it may be optional, but recommended). +// As of writing, aries-framework-go code does not use this, but it may be useful for custom solutions. +func (i *iterator) TotalItems() (int, error) { + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), i.timeout) + defer cancel() + + totalItems, err := i.coll.CountDocuments(ctxWithTimeout, i.filter) + if err != nil { + return -1, fmt.Errorf("failed to get document count from MongoDB: %w", err) + } + + return int(totalItems), nil +} + +// Close closes this iterator object, freeing resources. +func (i *iterator) Close() error { + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), i.timeout) + defer cancel() + + return i.cursor.Close(ctxWithTimeout) +} + +func setOptions(opts []Option, p *Provider) { + for _, opt := range opts { + opt(p) + } + + if p.logger == nil { + p.logger = &defaultLogger{ + log.New(os.Stdout, "MongoDB-Provider ", log.Ldate|log.Ltime|log.LUTC), + } + } + + if p.timeout == 0 { + p.timeout = defaultTimeout + } + + if p.maxRetries < 1 { + p.maxRetries = defaultMaxIndexCreationConflictRetries + } +} + +func isIndexConflictErrorMessage(err error) bool { + // DocumentDB may return either of these two error message. + documentDBPossibleErrMsg1 := "Non-unique" + documentDBPossibleErrMsg2 := "Existing index build in progress on the same collection. " + + "Collection is limited to a single index build at a time." + documentDBPossibleErrMsg3 := "EOF" + // MongoDB 5.0.0 may return this error message. + mongoDB500PossibleErrMsg := "incomplete read of message header" + + if strings.Contains(err.Error(), documentDBPossibleErrMsg1) || + strings.Contains(err.Error(), documentDBPossibleErrMsg2) || + strings.Contains(err.Error(), documentDBPossibleErrMsg3) || + strings.Contains(err.Error(), mongoDB500PossibleErrMsg) { + return true + } + + return false +} + +func validatePutInput(key string, value []byte, tags []storage.Tag) error { + if key == "" { + return errors.New("key cannot be empty") + } + + if value == nil { + return errors.New("value cannot be nil") + } + + for _, tag := range tags { + if strings.Contains(tag.Name, ":") { + return fmt.Errorf(invalidTagName, tag.Name) + } + + if strings.Contains(tag.Value, ":") { + return fmt.Errorf(invalidTagValue, tag.Value) + } + + if strings.Contains(tag.Name, "<") { // This also handles the <= case. + return fmt.Errorf(invalidTagName, tag.Name) + } + + if strings.Contains(tag.Value, "<") { // This also handles the <= case. + return fmt.Errorf(invalidTagValue, tag.Value) + } + + if strings.Contains(tag.Name, ">") { // This also handles the >= case. + return fmt.Errorf(invalidTagName, tag.Name) + } + + if strings.Contains(tag.Value, ">") { // This also handles the >= case. + return fmt.Errorf(invalidTagValue, tag.Value) + } + } + + return nil +} + +func convertTagSliceToMap(tagSlice []storage.Tag) (map[string]interface{}, error) { + tagsMap := make(map[string]interface{}) + + for _, tag := range tagSlice { + _, exists := tagsMap[tag.Name] + if exists { + return nil, fmt.Errorf("tag name %s appears in more than one tag. A single key-value pair cannot "+ + "have multiple tags that share the same tag name", tag.Name) + } + + tagsMap[tag.Name] = convertToIntIfPossible(tag.Value) + } + + return tagsMap, nil +} + +// If possible, converts value to an int and returns it. +// Otherwise, it returns value as a string, untouched. +func convertToIntIfPossible(value string) interface{} { + valueAsInt, err := strconv.Atoi(value) + if err != nil { + return value + } + + return valueAsInt +} + +func convertTagMapToSlice(tagMap map[string]interface{}) []storage.Tag { + tagsSlice := make([]storage.Tag, len(tagMap)) + + var counter int + + for tagName, tagValue := range tagMap { + tagsSlice[counter] = storage.Tag{ + Name: tagName, + Value: fmt.Sprintf("%v", tagValue), + } + + counter++ + } + + return tagsSlice +} + +type decoder interface { + Decode(interface{}) error +} + +func getKeyAndValueFromMongoDBResult(decoder decoder) (key string, value []byte, err error) { + data, errGetDataWrapper := getDataWrapperFromMongoDBResult(decoder) + if errGetDataWrapper != nil { + return "", nil, fmt.Errorf("failed to get data wrapper from MongoDB result: %w", errGetDataWrapper) + } + + if data.Doc != nil { + unescapedMap := unescapeMapForDocumentDB(data.Doc) + + dataBytes, errMarshal := json.Marshal(unescapedMap) + if errMarshal != nil { + return "", nil, fmt.Errorf("failed to marshal value into bytes: %w", errMarshal) + } + + return data.Key, dataBytes, nil + } + + if data.Bin != nil { + return data.Key, data.Bin, nil + } + + valueBytes, err := json.Marshal(data.Str) + if err != nil { + return "", nil, fmt.Errorf("marshal string value: %w", err) + } + + return data.Key, valueBytes, nil +} + +func getKeyAndRawMapFromMongoDBResult(decoder decoder) (key string, doc map[string]interface{}, err error) { + doc, errGetDataWrapper := getValueAsRawMapFromMongoDBResult(decoder) + if errGetDataWrapper != nil { + return "", nil, fmt.Errorf("failed to get data wrapper from MongoDB result: %w", errGetDataWrapper) + } + + id, ok := doc["_id"] + if !ok { + return "", nil, fmt.Errorf("no _id field in document") + } + + key, ok = id.(string) + if !ok { + return "", nil, fmt.Errorf("_id field in document is not a string") + } + + return key, doc, nil +} + +func getTagsFromMongoDBResult(decoder decoder) ([]storage.Tag, error) { + data, err := getDataWrapperFromMongoDBResult(decoder) + if err != nil { + return nil, fmt.Errorf("failed to get data wrapper from MongoDB result: %w", err) + } + + return convertTagMapToSlice(data.Tags), nil +} + +// getDataWrapperFromMongoDBResult unmarshals and returns a dataWrapper from the MongoDB result. +func getDataWrapperFromMongoDBResult(decoder decoder) (*dataWrapper, error) { + data := &dataWrapper{} + + if err := decoder.Decode(data); err != nil { + return nil, fmt.Errorf("failed to decode data from MongoDB: %w", err) + } + + return data, nil +} + +func getValueAsRawMapFromMongoDBResult(decoder decoder) (map[string]interface{}, error) { + data := make(map[string]interface{}) + + if err := decoder.Decode(data); err != nil { + return nil, fmt.Errorf("failed to decode data from MongoDB: %w", err) + } + + return data, nil +} + +func getQueryOptions(options []storage.QueryOption) storage.QueryOptions { + var queryOptions storage.QueryOptions + + for _, option := range options { + if option != nil { + option(&queryOptions) + } + } + + if queryOptions.InitialPageNum < 0 { + queryOptions.InitialPageNum = 0 + } + + return queryOptions +} + +// PrepareFilter converts the expression into a MongoDB filter. +func PrepareFilter(expressions []string, isJSONQuery bool) (bson.D, error) { + operands := make(bson.D, len(expressions)) + + for i, exp := range expressions { + operand, err := prepareSingleOperand(exp, isJSONQuery) + if err != nil { + return nil, err + } + + operands[i] = operand + } + + // When the bson.D below gets serialized, it'll be comma separated. + // MongoDB treats a comma separated list of expression as an implicit AND operation. + return operands, nil +} + +func prepareSingleOperand(expression string, isJSONQuery bool) (bson.E, error) { + var filterValue interface{} + + operator, splitExpression, err := determineOperatorAndSplit(expression) + if err != nil { + return bson.E{}, err + } + + var key string + + if isJSONQuery { + key = splitExpression[0] + } else { + key = fmt.Sprintf("tags.%s", splitExpression[0]) + } + + if operator == "$lt" || operator == "$lte" || operator == "$gt" || operator == "$gte" { + value, err := strconv.Atoi(splitExpression[1]) + if err != nil { + return bson.E{}, fmt.Errorf("invalid query format. when using any one of the <=, <, >=, > "+ + "operators, the immediate value on the right side side must be a valid integer: %w", err) + } + + filterValue = bson.D{ + {Key: operator, Value: value}, + } + + operand := bson.E{ + Key: key, + Value: filterValue, + } + + return operand, nil + } + + if operator == "$exists" { + filterValue = bson.D{ + {Key: "$exists", Value: true}, + } + } else { + filterValue = convertToIntIfPossible(splitExpression[1]) + } + + operand := bson.E{ + Key: key, + Value: filterValue, + } + + return operand, nil +} + +// determineOperatorAndSplit takes the given expression and returns the operator (in the format required by MongoDB) +// along with the expression split by the operator (as defined in the Store.Query documentation). +func determineOperatorAndSplit(expression string) (mongoDBOperator string, expressionSplit []string, err error) { + expressionSplitByLessThanOrEqualTo := strings.Split(expression, "<=") + if len(expressionSplitByLessThanOrEqualTo) == lessThanGreaterThanExpressionLength { + return "$lte", expressionSplitByLessThanOrEqualTo, nil + } + + expressionSplitByLessThan := strings.Split(expression, "<") + if len(expressionSplitByLessThan) == lessThanGreaterThanExpressionLength { + return "$lt", expressionSplitByLessThan, nil + } + + expressionSplitByGreaterThanOrEqualTo := strings.Split(expression, ">=") + if len(expressionSplitByGreaterThanOrEqualTo) == lessThanGreaterThanExpressionLength { + return "$gte", expressionSplitByGreaterThanOrEqualTo, nil + } + + expressionSplitByGreaterThan := strings.Split(expression, ">") + if len(expressionSplitByGreaterThan) == lessThanGreaterThanExpressionLength { + return "$gt", expressionSplitByGreaterThan, nil + } + + expressionSplitByEquals := strings.Split(expression, ":") + switch len(expressionSplitByEquals) { + case equalsExpressionTagNameOnlyLength: + return "$exists", expressionSplitByEquals, nil + case equalsExpressionTagNameAndValueLength: + return "", expressionSplitByEquals, nil + default: + return "", nil, errInvalidQueryExpressionFormat + } +} + +func generateModelForBulkWriteCall(operation storage.Operation) (model mongo.WriteModel, + isInsertOneModel bool, err error) { + if operation.Value == nil { + return mongo.NewDeleteOneModel().SetFilter(bson.M{"_id": operation.Key}), false, nil + } + + data, err := generateDataWrapper(operation.Key, operation.Value, operation.Tags) + if err != nil { + return nil, false, err + } + + if operation.PutOptions != nil && operation.PutOptions.IsNewKey { + return mongo.NewInsertOneModel().SetDocument(data), true, nil + } + + return mongo.NewReplaceOneModel(). + SetFilter(bson.M{"_id": operation.Key}). + SetReplacement(data). + SetUpsert(true), false, nil +} + +func generateDataWrapper(key string, value []byte, tags []storage.Tag) (dataWrapper, error) { + tagsAsMap, err := convertTagSliceToMap(tags) + if err != nil { + return dataWrapper{}, err + } + + data := dataWrapper{ + Key: key, + Tags: tagsAsMap, + } + + dataAsMap, err := convertMarshalledValueToMap(value) + + switch { + case err == nil: + data.Doc = dataAsMap + case errors.Is(err, errUnmarshalBytesIntoMap): + var unmarshalledStringValue string + + err = json.Unmarshal(value, &unmarshalledStringValue) + if err == nil { + data.Str = unmarshalledStringValue + } else { + data.Bin = value + } + default: + return dataWrapper{}, err + } + + return data, nil +} + +// PrepareDataForBSONStorage takes the given value and converts it to the type expected by the MongoDB driver for +// inserting documents. The value must be a struct with exported fields and proper json tags or a map. To use the +// MongoDB primary key (_id), you must have an _id field in either the struct or map. Alternatively, add it to the +// map returned by this function. If no _id field is set, then MongoDB will generate one for you. +func PrepareDataForBSONStorage(value interface{}) (map[string]interface{}, error) { + valueBytes, err := json.Marshal(value) + if err != nil { + return nil, err + } + + return convertMarshalledValueToMap(valueBytes) +} + +func convertMarshalledValueToMap(valueBytes []byte) (map[string]interface{}, error) { + var unmarshalledValue map[string]interface{} + + jsonDecoder := json.NewDecoder(bytes.NewReader(valueBytes)) + jsonDecoder.UseNumber() + + err := jsonDecoder.Decode(&unmarshalledValue) + if err != nil { + return nil, fmt.Errorf("%w: %s", errUnmarshalBytesIntoMap, err.Error()) + } + + escapedMap, err := escapeMapForDocumentDB(unmarshalledValue) + if err != nil { + return nil, err + } + + return escapedMap, nil +} + +// escapeMapForDocumentDB recursively travels through the given map and ensures that all keys are safe for DocumentDB. +// All "." characters in keys are replaced with "`" characters. +// If any "`" characters are discovered in keys then an error is returned, since this would cause confusion with the +// scheme described above. +func escapeMapForDocumentDB(unescapedMap map[string]interface{}) (map[string]interface{}, error) { + escapedMap := make(map[string]interface{}) + + for unescapedKey, unescapedValue := range unescapedMap { + escapedKey, escapedValue, err := escapeKeyValuePair(unescapedKey, unescapedValue) + if err != nil { + return nil, err + } + + escapedMap[escapedKey] = escapedValue + } + + return escapedMap, nil +} + +func escapeKeyValuePair(unescapedKey string, unescapedValue interface{}) (escapedKey string, escapedValue interface{}, + err error) { + if strings.Contains(unescapedKey, "`") { + return "", nil, + fmt.Errorf(`JSON keys cannot have "`+"`"+`" characters within them. Invalid key: %s`, unescapedKey) + } + + escapedValue, err = escapeValue(unescapedValue) + if err != nil { + return "", nil, err + } + + return escapeKey(unescapedKey), escapedValue, nil +} + +func escapeKey(unescapedKey string) string { + return strings.ReplaceAll(unescapedKey, ".", "`") +} + +func escapeValue(unescapedValue interface{}) (interface{}, error) { + unescapedValueAsArray, ok := unescapedValue.([]interface{}) + if ok { + return escapeArray(unescapedValueAsArray) + } + + unescapedValueAsMap, ok := unescapedValue.(map[string]interface{}) + if ok { + escapedValue, err := escapeMapForDocumentDB(unescapedValueAsMap) + if err != nil { + return nil, err + } + + return escapedValue, nil + } + + // In this case, the value is not a nested object or array and so doesn't need escaping. + return unescapedValue, nil +} + +func escapeArray(unescapedArray []interface{}) (interface{}, error) { + escapedArray := make([]interface{}, len(unescapedArray)) + + for i, unescapedValueInUnescapedArray := range unescapedArray { + escapedValue, err := escapeValue(unescapedValueInUnescapedArray) + if err != nil { + return nil, err + } + + escapedArray[i] = escapedValue + } + + return escapedArray, nil +} + +// This method is the inverse of the escapeMapForDocumentDB method. +func unescapeMapForDocumentDB(escapedMap map[string]interface{}) map[string]interface{} { + unescapedMap := make(map[string]interface{}) + + for escapedKey, escapedValue := range escapedMap { + unescapedKey, unescapedValue := unescapeKeyValuePair(escapedKey, escapedValue) + + unescapedMap[unescapedKey] = unescapedValue + } + + return unescapedMap +} + +func unescapeKeyValuePair(escapedKey string, escapedValue interface{}) (key string, unescapedValue interface{}) { + return unescapeKey(escapedKey), unescapeValue(escapedValue) +} + +func unescapeKey(escapedKey string) string { + return strings.ReplaceAll(escapedKey, "`", ".") +} + +func unescapeValue(escapedValue interface{}) interface{} { + escapedValueAsArray, ok := escapedValue.(bson.A) + if ok { + return unescapeArray(escapedValueAsArray) + } + + escapedValueAsMap, ok := escapedValue.(map[string]interface{}) + if ok { + return unescapeMapForDocumentDB(escapedValueAsMap) + } + + // In this case, the value is not a nested object or array and so doesn't need unescaping. + return escapedValue +} + +func unescapeArray(escapedArray []interface{}) interface{} { + unescapedArray := make([]interface{}, len(escapedArray)) + + for i, escapedValueInEscapedArray := range escapedArray { + unescapedArray[i] = unescapeValue(escapedValueInEscapedArray) + } + + return unescapedArray +} diff --git a/component/wallet-cli/internal/storage/mongodb/store_test.go b/component/wallet-cli/internal/storage/mongodb/store_test.go new file mode 100644 index 000000000..a7d839690 --- /dev/null +++ b/component/wallet-cli/internal/storage/mongodb/store_test.go @@ -0,0 +1,2286 @@ +/* +Copyright Scoir Inc, Gen Digital Inc. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package mongodb_test + +import ( + "context" + "encoding/json" + "fmt" + "log" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/cenkalti/backoff/v4" + "github.com/hyperledger/aries-framework-go/spi/storage" + dctest "github.com/ory/dockertest/v3" + dc "github.com/ory/dockertest/v3/docker" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/bsontype" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + + "github.com/trustbloc/vcs/component/wallet-cli/internal/storage/mongodb" +) + +const ( + mongoDBConnString = "mongodb://localhost:27017" + dockerMongoDBImage = "mongo" + dockerMongoDBTagV400 = "4.0.0" + // dockerMongoDBTagV428 = "4.2.8" + // dockerMongoDBTagV500 = "5.0.0" +) + +// This should function the same as the default logger in the mongodb package. +// This is here just to increase code coverage by allowing us to exercise the WithLogger option. +type testLogger struct { + logger *log.Logger +} + +func (d *testLogger) Infof(msg string, args ...interface{}) { + d.logger.Printf(msg, args...) +} + +func TestMongoDB_V4_0_0(t *testing.T) { + startContainerAndDoAllTests(t, dockerMongoDBTagV400) +} + +// func TestMongoDB_V4_2_8(t *testing.T) { +// startContainerAndDoAllTests(t, dockerMongoDBTagV428) +// } + +// func TestMongoDB_V5_0_0(t *testing.T) { +// startContainerAndDoAllTests(t, dockerMongoDBTagV500) +// } + +func TestProvider_New_Failure(t *testing.T) { + provider, err := mongodb.NewProvider("BadConnString") + require.EqualError(t, err, `failed to create a new MongoDB client: error parsing uri: `+ + `scheme must be "mongodb" or "mongodb+srv"`) + require.Nil(t, provider) +} + +func TestProvider_SetStoreConfig_Failure(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://BadURL", mongodb.WithTimeout(1)) + require.NoError(t, err) + + _, err = provider.OpenStore("StoreName") + require.NoError(t, err) + + err = provider.SetStoreConfig("StoreName", storage.StoreConfiguration{TagNames: []string{"tagName1"}}) + require.EqualError(t, err, "failed to set indexes: failed to get existing indexed tag names: "+ + "failed to get list of indexes from MongoDB: server selection error: context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: badurl:27017, Type: Unknown }, ] }") //nolint:lll +} + +func TestProvider_GetStoreConfig_Failure(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://BadURL", mongodb.WithTimeout(1)) + require.NoError(t, err) + + _, err = provider.OpenStore("TestStoreName") + require.NoError(t, err) + + config, err := provider.GetStoreConfig("TestStoreName") + require.EqualError(t, err, "failed to determine if the underlying database exists for teststorename: "+ + "server selection error: context deadline exceeded, current topology: { Type: Unknown, "+ + "Servers: [{ Addr: badurl:27017, Type: Unknown }, ] }") + require.Empty(t, config) +} + +func TestProvider_Ping_Failure(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://BadURL", mongodb.WithTimeout(time.Second)) + require.NoError(t, err) + + err = provider.Ping() + require.Contains(t, err.Error(), "server selection error: context deadline exceeded, current topology: "+ + "{ Type: Unknown, Servers: [{ Addr: badurl:27017, Type: Unknown, Last error: dial tcp: lookup badurl:") +} + +func TestStore_Put_Failure(t *testing.T) { + t.Run("Deadline exceeded (server not reachable)", func(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://BadURL", mongodb.WithTimeout(1)) + require.NoError(t, err) + + store, err := provider.OpenStore("StoreName") + require.NoError(t, err) + + err = store.Put("key", []byte("value")) + require.EqualError(t, err, "failed to run ReplaceOne command in MongoDB: server selection error: context "+ + "deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: badurl:27017, Type: Unknown }, ] }") + }) + t.Run("Invalid tags", func(t *testing.T) { + // We only test for < and > here since the : case is handled in the common unit tests (commontest.CheckAll) + t.Run("Tag name contains <", func(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://test") + require.NoError(t, err) + + store, err := provider.OpenStore("StoreName") + require.NoError(t, err) + + err = store.Put("key", []byte("value"), storage.Tag{Name: "<"}) + require.EqualError(t, err, `"<" is an invalid tag name since it contains one or more of the`+ + ` following substrings: ":", "<=", "<", ">=", ">"`) + }) + t.Run("Tag value contains <", func(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://test") + require.NoError(t, err) + + store, err := provider.OpenStore("StoreName") + require.NoError(t, err) + + err = store.Put("key", []byte("value"), storage.Tag{Value: "<"}) + require.EqualError(t, err, `"<" is an invalid tag value since it contains one or more of the`+ + ` following substrings: ":", "<=", "<", ">=", ">"`) + }) + t.Run("Tag name contains >", func(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://test") + require.NoError(t, err) + + store, err := provider.OpenStore("StoreName") + require.NoError(t, err) + + err = store.Put("key", []byte("value"), storage.Tag{Name: ">"}) + require.EqualError(t, err, `">" is an invalid tag name since it contains one or more of the`+ + ` following substrings: ":", "<=", "<", ">=", ">"`) + }) + t.Run("Tag value contains >", func(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://test") + require.NoError(t, err) + + store, err := provider.OpenStore("StoreName") + require.NoError(t, err) + + err = store.Put("key", []byte("value"), storage.Tag{Value: ">"}) + require.EqualError(t, err, `">" is an invalid tag value since it contains one or more of the`+ + ` following substrings: ":", "<=", "<", ">=", ">"`) + }) + t.Run("Tag name used more than once", func(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://test") + require.NoError(t, err) + + store, err := provider.OpenStore("StoreName") + require.NoError(t, err) + + err = store.Put("key", []byte("value"), storage.Tag{Name: "SomeName"}, storage.Tag{Name: "SomeName"}) + require.EqualError(t, err, "tag name SomeName appears in more than one tag. "+ + "A single key-value pair cannot have multiple tags that share the same tag name") + }) + }) +} + +func TestStore_PutAsJSON_Failure(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://ExampleURL") + require.NoError(t, err) + + store, err := provider.OpenStore("StoreName") + require.NoError(t, err) + + mongoDBStore, ok := store.(*mongodb.Store) + require.True(t, ok) + + err = mongoDBStore.PutAsJSON("key", "value") + require.EqualError(t, err, "failed to unmarshal bytes into map: json: cannot unmarshal string into "+ + "Go value of type map[string]interface {}") +} + +func TestStore_Get_Failure(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://BadURL", mongodb.WithTimeout(1)) + require.NoError(t, err) + + store, err := provider.OpenStore("StoreName") + require.NoError(t, err) + + value, err := store.Get("key") + require.EqualError(t, err, "failed to run FindOne command in MongoDB: server selection error: context "+ + "deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: badurl:27017, Type: Unknown }, ] }") + require.Nil(t, value) +} + +func TestStore_GetTags_Failure(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://BadURL", mongodb.WithTimeout(1)) + require.NoError(t, err) + + store, err := provider.OpenStore("StoreName") + require.NoError(t, err) + + tags, err := store.GetTags("key") + require.EqualError(t, err, "failed to run FindOne command in MongoDB: server selection error: "+ + "context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: badurl:27017, "+ + "Type: Unknown }, ] }") + require.Nil(t, tags) +} + +func TestStore_GetBulk_Failure(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://BadURL", mongodb.WithTimeout(1)) + require.NoError(t, err) + + store, err := provider.OpenStore("StoreName") + require.NoError(t, err) + + values, err := store.GetBulk("key1", "key2") + require.EqualError(t, err, "failed to run Find command in MongoDB: server selection error: context "+ + "deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: badurl:27017, Type: Unknown }, ] }") + require.Nil(t, values) +} + +func TestStore_Delete_Failure(t *testing.T) { + provider, err := mongodb.NewProvider("mongodb://BadURL", mongodb.WithTimeout(1)) + require.NoError(t, err) + + store, err := provider.OpenStore("StoreName") + require.NoError(t, err) + + err = store.Delete("key1") + + require.Contains(t, err.Error(), "failed to run DeleteOne command in MongoDB: server selection error: "+ + "context deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: badurl:27017, Type: Unknown") +} + +func TestStore_Batch_TimeoutFailure(t *testing.T) { + storeName := randomStoreName() + + provider, err := mongodb.NewProvider("mongodb://BadURL", mongodb.WithTimeout(1)) + require.NoError(t, err) + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + + err = store.Batch([]storage.Operation{{Key: "key"}}) + require.EqualError(t, err, "failed to run BulkWrite command in MongoDB: server selection error: context "+ + "deadline exceeded, current topology: { Type: Unknown, Servers: [{ Addr: badurl:27017, Type: Unknown }, ] }") +} + +func startContainerAndDoAllTests(t *testing.T, dockerMongoDBTag string) { + t.Helper() + + pool, mongoDBResource := startMongoDBContainer(t, dockerMongoDBTag) + + defer func() { + require.NoError(t, pool.Purge(mongoDBResource), "failed to purge MongoDB resource") + }() + + doAllTests(t, mongoDBConnString) +} + +func doAllTests(t *testing.T, connString string) { + t.Helper() + + provider, err := mongodb.NewProvider(connString, mongodb.WithLogger(&testLogger{ + logger: log.New(os.Stdout, "MongoDB-Provider ", log.Ldate|log.Ltime|log.LUTC), + })) + require.NoError(t, err) + + CheckAll(t, provider) + testGetStoreConfigUnderlyingDatabaseCheck(t, connString) + testMultipleProvidersSettingSameStoreConfigurationAtTheSameTime(t, connString) + testMultipleProvidersStoringSameDataAtTheSameTime(t, connString) + testMultipleProvidersStoringSameBulkDataAtTheSameTime(t, connString) + testCloseProviderTwice(t, connString) + testQueryWithMultipleTags(t, connString) + testQueryWithLessThanGreaterThanOperators(t, connString) + testStoreJSONNeedingEscaping(t, connString) + testBatchIsNewKeyError(t, connString) + testPing(t, connString) + testGetAsRawMap(t, connString) + testGetBulkAsRawMap(t, connString) + testCustomIndexAndQuery(t, connString) + testDocumentReplacementAndMarshalling(t, connString) + testBulkWrite(t, connString) +} + +func testGetStoreConfigUnderlyingDatabaseCheck(t *testing.T, connString string) { + t.Helper() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + defer func() { + require.NoError(t, provider.Close()) + }() + + storeName := randomStoreName() + + // The MongoDB database shouldn't exist yet. + config, err := provider.GetStoreConfig(storeName) + require.Equal(t, true, errors.Is(storage.ErrStoreNotFound, err), + "unexpected error or no error") + require.Empty(t, config) + + _, err = provider.OpenStore(storeName) + require.NoError(t, err) + + // Even though MongoDB defers creation of the underlying database until there is data put in or indexes are set, + // we have code to ensure this method doesn't return an ErrStoreNotFound. + config, err = provider.GetStoreConfig(storeName) + require.NoError(t, err) + require.Empty(t, config) + + // This will cause MongoDB to create the actual database. + err = provider.SetStoreConfig(storeName, storage.StoreConfiguration{TagNames: []string{"TagName1"}}) + require.NoError(t, err) + + // Now the underlying database should be found. + config, err = provider.GetStoreConfig(storeName) + require.NoError(t, err) + require.Equal(t, "TagName1", config.TagNames[0]) + + err = provider.Close() + require.NoError(t, err) + + // Create a new Provider object. + provider2, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + defer func() { + require.NoError(t, provider2.Close()) + }() + + // This method tells you how many Store objects are open in this Provider. + // Since it's a new Provider, there shouldn't be any. + openStores := provider2.GetOpenStores() + require.Len(t, openStores, 0) + + // This will succeed since GetStoreConfig checks the underlying databases instead of the + // in-memory Store objects (which will be empty). + // If we hadn't called SetStoreConfig before, then this would return an ErrStoreNotFound. + config, err = provider2.GetStoreConfig(storeName) + require.NoError(t, err) + require.Equal(t, "TagName1", config.TagNames[0]) + + // The call above should not have created a new Store object. + openStores = provider2.GetOpenStores() + require.Len(t, openStores, 0) + + // As mentioned above, MongoDB defers creating databases until there is data put in or indexes are set. + // The code above triggered database creation by creating indexes. Below we will do the same type of test, but this + // time we create the database by storing data. + storeName2 := randomStoreName() + + store, err := provider2.OpenStore(storeName2) + require.NoError(t, err) + + // Even though MongoDB defers creation of the underlying database until there is data put in or indexes are set, + // we have code to ensure this method doesn't return an ErrStoreNotFound. + config, err = provider2.GetStoreConfig(storeName2) + require.NoError(t, err) + require.Empty(t, config) + + // This will cause MongoDB to create the actual database. + err = store.Put("key", []byte("value")) + require.NoError(t, err) + + // Create a new Provider object. + provider3, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + defer func() { + require.NoError(t, provider3.Close()) + }() + + // This will succeed since GetStoreConfig checks the underlying databases instead of the + // in-memory Store objects (which will be empty). + // If we hadn't called Put before, then this would return an ErrStoreNotFound. + config, err = provider3.GetStoreConfig(storeName2) + require.NoError(t, err) + require.Empty(t, config.TagNames) +} + +func testMultipleProvidersSettingSameStoreConfigurationAtTheSameTime(t *testing.T, connString string) { + t.Helper() + + const numberOfProviders = 100 + + storeName := randomStoreName() + + providers := make([]*mongodb.Provider, numberOfProviders) + + openStores := make([]storage.Store, numberOfProviders) + + for i := 0; i < numberOfProviders; i++ { + provider, err := mongodb.NewProvider(connString, mongodb.WithTimeout(time.Second*10), mongodb.WithMaxRetries(10), mongodb.WithTimeBetweenRetries(time.Second)) + require.NoError(t, err) + + // If you see a warning in your IDE about having a defer statement in a loop, it can be ignored in this case. + // The goal is to close all the stores as soon as there's a failure anywhere in this test in order to free + // up resources for other tests, which may still pass. We don't want them to close at the end of this loop, + // so there's no issue having this here. + defer func() { + require.NoError(t, provider.Close()) + }() + + providers[i] = provider + + openStore, err := providers[i].OpenStore(storeName) + require.NoError(t, err) + + openStores[i] = openStore + } + + var waitGroup sync.WaitGroup + + for i := 0; i < numberOfProviders; i++ { + i := i + + waitGroup.Add(1) + + setStoreConfig := func() { + defer waitGroup.Done() + + errSetStoreConfig := providers[i].SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{ + "TagName1", "TagName2", "TagName3", "TagName4", + "TagName5", "TagName6", "TagName7", "TagName8", + "TagName9", "TagName10", "TagName11", "TagName12", + "TagName13", "TagName14", "TagName15", "TagName16", + "TagName17", "TagName18", "TagName19", "TagName20", + "TagName21", "TagName22", "TagName23", "TagName24", + "TagName25", "TagName26", "TagName27", "TagName28", + "TagName29", "TagName30", "TagName31", "TagName32", + }}) + require.NoError(t, errSetStoreConfig) + + // Close the Store as soon as possible in order to free up resources for other threads. + require.NoError(t, openStores[i].Close()) + } + go setStoreConfig() + } + + waitGroup.Wait() + + storeConfig, err := providers[0].GetStoreConfig(storeName) + require.NoError(t, err) + + require.Len(t, storeConfig.TagNames, 32) + + for i := 0; i < len(storeConfig.TagNames); i++ { + require.Equal(t, fmt.Sprintf("TagName%d", i+1), storeConfig.TagNames[i]) + } +} + +func testMultipleProvidersStoringSameDataAtTheSameTime(t *testing.T, connString string) { + t.Helper() + + const numberOfProviders = 100 + + storeName := randomStoreName() + + providers := make([]*mongodb.Provider, numberOfProviders) + + openStores := make([]storage.Store, numberOfProviders) + + for i := 0; i < numberOfProviders; i++ { + provider, err := mongodb.NewProvider(connString, mongodb.WithTimeout(time.Second*10), mongodb.WithMaxRetries(10), mongodb.WithTimeBetweenRetries(time.Second)) + require.NoError(t, err) + + // If you see a warning in your IDE about having a defer statement in a loop, it can be ignored in this case. + // The goal is to close all the stores as soon as there's a failure anywhere in this test in order to free + // up resources for other tests, which may still pass. We don't want them to close at the end of this loop, + // so there's no issue having this here. + defer func() { + require.NoError(t, provider.Close()) + }() + + providers[i] = provider + + openStore, err := providers[i].OpenStore(storeName) + require.NoError(t, err) + + openStores[i] = openStore + } + + type sampleStruct struct { + Entry1 string `json:"entry1"` + Entry2 string `json:"entry2"` + Entry3 string `json:"entry3"` + } + + sampleData := sampleStruct{ + Entry1: "value1", + Entry2: "value2", + Entry3: "value3", + } + + sampleDataBytes, err := json.Marshal(sampleData) + require.NoError(t, err) + + var waitGroup sync.WaitGroup + + for i := 0; i < numberOfProviders; i++ { + i := i + + waitGroup.Add(1) + + setStoreConfig := func() { + defer waitGroup.Done() + + errPut := openStores[i].Put("key", sampleDataBytes) + require.NoError(t, errPut) + } + go setStoreConfig() + } + + waitGroup.Wait() + + value, err := openStores[0].Get("key") + require.NoError(t, err) + + var retrievedData sampleStruct + + err = json.Unmarshal(value, &retrievedData) + require.NoError(t, err) + + require.Equal(t, sampleData.Entry1, retrievedData.Entry1) + require.Equal(t, sampleData.Entry2, retrievedData.Entry2) + require.Equal(t, sampleData.Entry3, retrievedData.Entry3) +} + +func testMultipleProvidersStoringSameBulkDataAtTheSameTime(t *testing.T, connString string) { + t.Helper() + + const numberOfProviders = 100 + + storeName := randomStoreName() + + providers := make([]*mongodb.Provider, numberOfProviders) + + openStores := make([]storage.Store, numberOfProviders) + + for i := 0; i < numberOfProviders; i++ { + provider, err := mongodb.NewProvider(connString, mongodb.WithTimeout(time.Second*5), mongodb.WithMaxRetries(10), mongodb.WithTimeBetweenRetries(time.Second)) + require.NoError(t, err) + + // If you see a warning in your IDE about having a defer statement in a loop, it can be ignored in this case. + // The goal is to close all the stores as soon as there's a failure anywhere in this test in order to free + // up resources for other tests, which may still pass. We don't want them to close at the end of this loop, + // so there's no issue having this here. + defer func() { + require.NoError(t, provider.Close()) + }() + + providers[i] = provider + + openStore, err := providers[i].OpenStore(storeName) + require.NoError(t, err) + + openStores[i] = openStore + } + + type sampleStruct struct { + Entry1 string `json:"entry1"` + Entry2 string `json:"entry2"` + Entry3 string `json:"entry3"` + } + + sampleData1 := sampleStruct{ + Entry1: "value1", + Entry2: "value2", + Entry3: "value3", + } + + sampleData1Bytes, err := json.Marshal(sampleData1) + require.NoError(t, err) + + sampleData2 := sampleStruct{ + Entry1: "value4", + Entry2: "value5", + Entry3: "value6", + } + + sampleData2Bytes, err := json.Marshal(sampleData2) + require.NoError(t, err) + + sampleData3 := sampleStruct{ + Entry1: "value7", + Entry2: "value8", + Entry3: "value9", + } + + sampleData3Bytes, err := json.Marshal(sampleData3) + require.NoError(t, err) + + operations := []storage.Operation{ + {Key: "key1", Value: sampleData1Bytes}, + {Key: "key2", Value: sampleData2Bytes}, + {Key: "key3", Value: sampleData3Bytes}, + } + + var waitGroup sync.WaitGroup + + for i := 0; i < numberOfProviders; i++ { + i := i + + waitGroup.Add(1) + + setStoreConfig := func() { + defer waitGroup.Done() + + errBatch := openStores[i].Batch(operations) + require.NoError(t, errBatch) + + // Close the Store as soon as possible in order to free up resources for other threads. + require.NoError(t, openStores[i].Close()) + } + go setStoreConfig() + } + + waitGroup.Wait() + + values, err := openStores[0].GetBulk("key1", "key2", "key3") + require.NoError(t, err) + + require.Len(t, values, 3) + + var retrievedData1 sampleStruct + + err = json.Unmarshal(values[0], &retrievedData1) + require.NoError(t, err) + + require.Equal(t, sampleData1.Entry1, retrievedData1.Entry1) + require.Equal(t, sampleData1.Entry2, retrievedData1.Entry2) + require.Equal(t, sampleData1.Entry3, retrievedData1.Entry3) + + var retrievedData2 sampleStruct + + err = json.Unmarshal(values[1], &retrievedData2) + require.NoError(t, err) + + require.Equal(t, sampleData2.Entry1, retrievedData2.Entry1) + require.Equal(t, sampleData2.Entry2, retrievedData2.Entry2) + require.Equal(t, sampleData2.Entry3, retrievedData2.Entry3) + + var retrievedData3 sampleStruct + + err = json.Unmarshal(values[2], &retrievedData3) + require.NoError(t, err) + + require.Equal(t, sampleData3.Entry1, retrievedData3.Entry1) + require.Equal(t, sampleData3.Entry2, retrievedData3.Entry2) + require.Equal(t, sampleData3.Entry3, retrievedData3.Entry3) +} + +func testCloseProviderTwice(t *testing.T, connString string) { + t.Helper() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + _, err = provider.OpenStore("TestStore1") + require.NoError(t, err) + + _, err = provider.OpenStore("TestStore2") + require.NoError(t, err) + + require.NoError(t, provider.Close()) + require.NoError(t, provider.Close()) // Should succeed, even if called repeatedly. +} + +func testQueryWithMultipleTags(t *testing.T, connString string) { //nolint: gocyclo // test file + t.Helper() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + defer func() { + require.NoError(t, provider.Close()) + }() + + keysToPut, valuesToPut, tagsToPut := getTestData() + + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{ + tagsToPut[0][0].Name, + tagsToPut[0][1].Name, + tagsToPut[0][2].Name, + tagsToPut[0][3].Name, + tagsToPut[0][4].Name, + }}) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + t.Run("Both pairs are tag names + values - 3 values found", func(t *testing.T) { + queryExpressionsToTest := []string{ + "Breed:GoldenRetriever&&NumLegs:4&&EarType:Floppy", + "NumLegs:4&&EarType:Floppy&&Breed:GoldenRetriever", // Should be equivalent to the above expression + } + + expectedKeys := []string{keysToPut[0], keysToPut[3], keysToPut[4]} + expectedValues := [][]byte{valuesToPut[0], valuesToPut[3], valuesToPut[4]} + expectedTags := [][]storage.Tag{tagsToPut[0], tagsToPut[3], tagsToPut[4]} + expectedTotalItemsCount := 3 + + queryOptionsToTest := []storage.QueryOption{ + nil, + storage.WithPageSize(2), + storage.WithPageSize(1), + storage.WithPageSize(100), + } + + for _, queryExpressionToTest := range queryExpressionsToTest { + for _, queryOptionToTest := range queryOptionsToTest { + iterator, err := store.Query(queryExpressionToTest, queryOptionToTest) + require.NoError(t, err) + + verifyExpectedIterator2(t, iterator, expectedKeys, expectedValues, expectedTags, expectedTotalItemsCount, false) + } + } + }) + t.Run("Both pairs are tag names + values - 2 values found", func(t *testing.T) { + queryExpressionsToTest := []string{ + "Breed:GoldenRetriever&&Personality:Calm", + "Personality:Calm&&Breed:GoldenRetriever", // Should be equivalent to the above expression + } + + expectedKeys := []string{keysToPut[3], keysToPut[4]} + expectedValues := [][]byte{valuesToPut[3], valuesToPut[4]} + expectedTags := [][]storage.Tag{tagsToPut[3], tagsToPut[4]} + expectedTotalItemsCount := 2 + + queryOptionsToTest := []storage.QueryOption{ + nil, + storage.WithPageSize(2), + storage.WithPageSize(1), + storage.WithPageSize(100), + } + + for _, queryExpressionToTest := range queryExpressionsToTest { + for _, queryOptionToTest := range queryOptionsToTest { + iterator, err := store.Query(queryExpressionToTest, queryOptionToTest) + require.NoError(t, err) + + verifyExpectedIterator2(t, iterator, expectedKeys, expectedValues, expectedTags, expectedTotalItemsCount, false) + } + } + }) + t.Run("Both pairs are tag names + values - 1 value found", func(t *testing.T) { + queryExpressionsToTest := []string{ + "Personality:Shy&&EarType:Pointy", + "EarType:Pointy&&Personality:Shy", // Should be equivalent to the above expression + } + + expectedKeys := []string{keysToPut[1]} + expectedValues := [][]byte{valuesToPut[1]} + expectedTags := [][]storage.Tag{tagsToPut[1]} + expectedTotalItemsCount := 1 + + queryOptionsToTest := []storage.QueryOption{ + nil, + storage.WithPageSize(2), + storage.WithPageSize(1), + storage.WithPageSize(100), + } + + for _, queryExpressionToTest := range queryExpressionsToTest { + for _, queryOptionToTest := range queryOptionsToTest { + iterator, err := store.Query(queryExpressionToTest, queryOptionToTest) + require.NoError(t, err) + + verifyExpectedIterator2(t, iterator, expectedKeys, expectedValues, expectedTags, expectedTotalItemsCount, false) + } + } + }) + t.Run("Both pairs are tag names + values - 0 values found", func(t *testing.T) { + queryExpressionsToTest := []string{ + "Personality:Crazy&&EarType:Pointy", + "EarType:Pointy&&Personality:Crazy", // Should be equivalent to the above expression + } + + expectedTotalItemsCount := 0 + + queryOptionsToTest := []storage.QueryOption{ + nil, + storage.WithPageSize(2), + storage.WithPageSize(1), + storage.WithPageSize(100), + } + + for _, queryExpressionToTest := range queryExpressionsToTest { + for _, queryOptionToTest := range queryOptionsToTest { + iterator, err := store.Query(queryExpressionToTest, queryOptionToTest) + require.NoError(t, err) + + verifyExpectedIterator2(t, iterator, nil, nil, nil, expectedTotalItemsCount, false) + } + } + }) + t.Run("First pair is a tag name + value, second is a tag name only - 1 value found", func(t *testing.T) { + queryExpressionsToTest := []string{ + "EarType:Pointy&&Nickname", + "Nickname&&EarType:Pointy", // Should be equivalent to the above expression + } + + expectedKeys := []string{keysToPut[2]} + expectedValues := [][]byte{valuesToPut[2]} + expectedTags := [][]storage.Tag{tagsToPut[2]} + expectedTotalItemsCount := 1 + + queryOptionsToTest := []storage.QueryOption{ + nil, + storage.WithPageSize(2), + storage.WithPageSize(1), + storage.WithPageSize(100), + } + + for _, queryExpressionToTest := range queryExpressionsToTest { + for _, queryOptionToTest := range queryOptionsToTest { + iterator, err := store.Query(queryExpressionToTest, queryOptionToTest) + require.NoError(t, err) + + verifyExpectedIterator2(t, iterator, expectedKeys, expectedValues, expectedTags, expectedTotalItemsCount, false) + } + } + }) + t.Run("First pair is a tag name + value, second is a tag name only - 0 values found", func(t *testing.T) { + queryExpressionsToTest := []string{ + "EarType:Pointy&&CoatType", + "CoatType&&EarType:Pointy", // Should be equivalent to the above expression + } + + expectedTotalItemsCount := 0 + + queryOptionsToTest := []storage.QueryOption{ + nil, + storage.WithPageSize(2), + storage.WithPageSize(1), + storage.WithPageSize(100), + } + + for _, queryExpressionToTest := range queryExpressionsToTest { + for _, queryOptionToTest := range queryOptionsToTest { + iterator, err := store.Query(queryExpressionToTest, queryOptionToTest) + require.NoError(t, err) + + verifyExpectedIterator2(t, iterator, nil, nil, nil, expectedTotalItemsCount, false) + } + } + }) +} + +func testQueryWithLessThanGreaterThanOperators(t *testing.T, connString string) { + t.Helper() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + defer func() { + require.NoError(t, provider.Close()) + }() + + keysToPut, valuesToPut, tagsToPut := getTestData() + + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{ + tagsToPut[0][0].Name, + tagsToPut[0][1].Name, + tagsToPut[0][2].Name, + tagsToPut[0][3].Name, + tagsToPut[0][4].Name, + tagsToPut[0][5].Name, + }}) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + t.Run("Less than or equal to", func(t *testing.T) { + queryExpression := "Age<=2" + + expectedKeys := []string{keysToPut[0], keysToPut[1], keysToPut[2]} + expectedValues := [][]byte{valuesToPut[0], valuesToPut[1], valuesToPut[2]} + expectedTags := [][]storage.Tag{tagsToPut[0], tagsToPut[1], tagsToPut[2]} + expectedTotalItemsCount := 3 + + queryOptionsToTest := []storage.QueryOption{ + nil, + storage.WithPageSize(2), + storage.WithPageSize(1), + storage.WithPageSize(100), + } + + for _, queryOptionToTest := range queryOptionsToTest { + iterator, err := store.Query(queryExpression, queryOptionToTest) + require.NoError(t, err) + + verifyExpectedIterator2(t, iterator, expectedKeys, expectedValues, expectedTags, expectedTotalItemsCount, false) + } + }) + t.Run("Less than", func(t *testing.T) { + queryExpression := "Age<2" + + expectedKeys := []string{keysToPut[1], keysToPut[2]} + expectedValues := [][]byte{valuesToPut[1], valuesToPut[2]} + expectedTags := [][]storage.Tag{tagsToPut[1], tagsToPut[2]} + expectedTotalItemsCount := 2 + + queryOptionsToTest := []storage.QueryOption{ + nil, + storage.WithPageSize(2), + storage.WithPageSize(1), + storage.WithPageSize(100), + } + + for _, queryOptionToTest := range queryOptionsToTest { + iterator, err := store.Query(queryExpression, queryOptionToTest) + require.NoError(t, err) + + verifyExpectedIterator2(t, iterator, expectedKeys, expectedValues, expectedTags, expectedTotalItemsCount, false) + } + }) + t.Run("Greater than or equal to", func(t *testing.T) { + queryExpression := "Age>=2" + + expectedKeys := []string{keysToPut[0], keysToPut[3]} + expectedValues := [][]byte{valuesToPut[0], valuesToPut[3]} + expectedTags := [][]storage.Tag{tagsToPut[0], tagsToPut[3]} + expectedTotalItemsCount := 2 + + queryOptionsToTest := []storage.QueryOption{ + nil, + storage.WithPageSize(2), + storage.WithPageSize(1), + storage.WithPageSize(100), + } + + for _, queryOptionToTest := range queryOptionsToTest { + iterator, err := store.Query(queryExpression, queryOptionToTest) + require.NoError(t, err) + + verifyExpectedIterator2(t, iterator, expectedKeys, expectedValues, expectedTags, expectedTotalItemsCount, false) + } + }) + t.Run("Greater than", func(t *testing.T) { + queryExpression := "Age>2" + + expectedKeys := []string{keysToPut[3]} + expectedValues := [][]byte{valuesToPut[3]} + expectedTags := [][]storage.Tag{tagsToPut[3]} + expectedTotalItemsCount := 1 + + queryOptionsToTest := []storage.QueryOption{ + nil, + storage.WithPageSize(2), + storage.WithPageSize(1), + storage.WithPageSize(100), + } + + for _, queryOptionToTest := range queryOptionsToTest { + iterator, err := store.Query(queryExpression, queryOptionToTest) + require.NoError(t, err) + + verifyExpectedIterator2(t, iterator, expectedKeys, expectedValues, expectedTags, expectedTotalItemsCount, false) + } + }) + t.Run("Tag value is not a valid integer", func(t *testing.T) { + iterator, err := store.Query("TagName>ThisIsNotAnInteger") + require.EqualError(t, err, "invalid query format. when using any one of the <=, <, >=, > operators, "+ + "the immediate value on the right side side must be a valid integer: strconv.Atoi: parsing "+ + `"ThisIsNotAnInteger": invalid syntax`) + require.Nil(t, iterator) + }) +} + +func testStoreJSONNeedingEscaping(t *testing.T, connString string) { + t.Helper() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + defer func() { + require.NoError(t, provider.Close()) + }() + + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + t.Run("Success", func(t *testing.T) { + t.Run("One simple key-value pair", func(t *testing.T) { + type simpleType struct { + OneDotHere string `json:"oneDotHere.,omitempty"` + } + + testValue := simpleType{OneDotHere: "SomeValue"} + + testValueBytes, err := json.Marshal(testValue) + require.NoError(t, err) + + testKey := "TestKey1" + + err = store.Put(testKey, testValueBytes) + require.NoError(t, err) + + testValueBytesRetrieved, err := store.Get(testKey) + require.NoError(t, err) + + var testValueRetrieved simpleType + + err = json.Unmarshal(testValueBytesRetrieved, &testValueRetrieved) + require.NoError(t, err) + + require.Equal(t, testValue.OneDotHere, testValueRetrieved.OneDotHere) + }) + t.Run("Two key-value pairs, no escaping needed", func(t *testing.T) { + type testType struct { + NoDotHere string `json:"noDotHere,omitempty"` + DotInValue string `json:"dotInValue,omitempty"` + } + + testValue := testType{ + NoDotHere: "SomeValue", + DotInValue: "DotHereButItDoesn'tNeedEscaping.", + } + + testValueBytes, err := json.Marshal(testValue) + require.NoError(t, err) + + testKey := "TestKey2" + + err = store.Put(testKey, testValueBytes) + require.NoError(t, err) + + testValueRetrievedBytes, err := store.Get(testKey) + require.NoError(t, err) + + var testValueRetrieved testType + + err = json.Unmarshal(testValueRetrievedBytes, &testValueRetrieved) + require.NoError(t, err) + + require.True(t, reflect.DeepEqual(testValueRetrieved, testValue), + "Value retrieved from storage not the same as what was put in originally") + }) + t.Run("Two key-value pairs, only one needs escaping", func(t *testing.T) { + type testType struct { + OneDotHere string `json:"oneDotHere.,omitempty"` + DotInValue string `json:"dotInValue,omitempty"` + } + + testValue := testType{ + OneDotHere: "SomeValue", + DotInValue: "DotHereButItDoesn'tNeedEscaping.", + } + + testValueBytes, err := json.Marshal(testValue) + require.NoError(t, err) + + testKey := "TestKey3" + + err = store.Put(testKey, testValueBytes) + require.NoError(t, err) + + testValueRetrievedBytes, err := store.Get(testKey) + require.NoError(t, err) + + var testValueRetrieved testType + + err = json.Unmarshal(testValueRetrievedBytes, &testValueRetrieved) + require.NoError(t, err) + + require.True(t, reflect.DeepEqual(testValueRetrieved, testValue), + "Value retrieved from storage not the same as what was put in originally") + }) + t.Run("Nested object", func(t *testing.T) { + type testTypeInner struct { + SeveralDotsHere string `json:".several.Dots.Here.,omitempty"` + } + + type testType struct { + OneDotHere string `json:"oneDotHere.,omitempty"` + NoDotHere string `json:"noDotHere,omitempty"` + NestedObjectWithDotInName testTypeInner `json:"nestedObject.,omitempty"` + } + + testValue := testType{ + OneDotHere: "SomeValue", + NoDotHere: "AlsoSomeValue", + NestedObjectWithDotInName: testTypeInner{SeveralDotsHere: "SomeNestedValue"}, + } + + testValueBytes, err := json.Marshal(testValue) + require.NoError(t, err) + + testKey := "TestKey4" + + err = store.Put(testKey, testValueBytes) + require.NoError(t, err) + + testValueRetrievedBytes, err := store.Get(testKey) + require.NoError(t, err) + + var testValueRetrieved testType + + err = json.Unmarshal(testValueRetrievedBytes, &testValueRetrieved) + require.NoError(t, err) + + require.True(t, reflect.DeepEqual(testValueRetrieved, testValue), + "Value retrieved from storage not the same as what was put in originally") + }) + t.Run("Array", func(t *testing.T) { + type testTypeInner struct { + SeveralDotsHere string `json:".several.Dots.Here.,omitempty"` + } + + type testType struct { + OneDotHere string `json:"oneDotHere.,omitempty"` + NoDotHere string `json:"noDotHere,omitempty"` + ArrayOfNestedObjects []testTypeInner `json:"nestedObject.,omitempty"` + } + + testValue := testType{ + OneDotHere: "SomeValue", + NoDotHere: "AlsoSomeValue", + ArrayOfNestedObjects: []testTypeInner{ + {SeveralDotsHere: "SomeNestedValue1"}, + {SeveralDotsHere: "SomeNestedValue2"}, + }, + } + + testValueBytes, err := json.Marshal(testValue) + require.NoError(t, err) + + testKey := "TestKey5" + + err = store.Put(testKey, testValueBytes) + require.NoError(t, err) + + testValueRetrievedBytes, err := store.Get(testKey) + require.NoError(t, err) + + var testValueRetrieved testType + + err = json.Unmarshal(testValueRetrievedBytes, &testValueRetrieved) + require.NoError(t, err) + + require.True(t, reflect.DeepEqual(testValueRetrieved, testValue), + "Value retrieved from storage not the same as what was put in originally") + }) + t.Run("Big, complex object with many different types and lots of nesting and arrays", func(t *testing.T) { + type leaf struct { + NoDotHere string `json:"noDotHere,omitempty"` + OneDotHere bool `json:"oneDotHere.,omitempty"` + SeveralDotsHere float64 `json:".several.Dots.Here.,omitempty"` + } + + type smallerBranch struct { + OneDotHere string `json:"oneDotHere.,omitempty"` + SeveralDotsHere float32 `json:".several.Dots.Here.,omitempty"` + Leaf leaf `json:"leaf...,omitempty"` + } + + type biggerBranch struct { + SeveralDotsHere int `json:".several.Dots.Here.,omitempty"` + NoDotHere string `json:"noDotHere,omitempty"` + SmallerBranch smallerBranch `json:"smallerBranch,omitempty"` + SmallerBranches []smallerBranch `json:"smaller.Branches,omitempty"` + } + + type treeRoot struct { + OneDotHere string `json:"oneDotHere.,omitempty"` + NoDotHere string `json:"noDotHere,omitempty"` + AlsoNoDotHere int `json:"alsoNoDotHere,omitempty"` + DeeplyNestedObject1 biggerBranch `json:"deeply.NestedObject1,omitempty"` + ArrayOfNestedObjects []leaf `json:"arrayOfNestedObjects.,omitempty"` + DeeplyNestedObject2 biggerBranch `json:"deeplyNestedObject2,omitempty"` + } + + testValue := treeRoot{ + OneDotHere: "SomeValue1", + NoDotHere: "SomeValue2", + AlsoNoDotHere: 3, + DeeplyNestedObject1: biggerBranch{ + SeveralDotsHere: -4, + NoDotHere: "SomeValue3", + SmallerBranch: smallerBranch{ + OneDotHere: "SomeValue4", + SeveralDotsHere: 0.65, + Leaf: leaf{ + NoDotHere: "SomeValue5", + OneDotHere: true, + SeveralDotsHere: -17.6789323, + }, + }, + SmallerBranches: []smallerBranch{ + { + OneDotHere: "SomeValue5", + SeveralDotsHere: 100.654, + Leaf: leaf{ + NoDotHere: "SomeValue6", + OneDotHere: false, + SeveralDotsHere: 101, + }, + }, + { + OneDotHere: "SomeValue7", + SeveralDotsHere: 1, + Leaf: leaf{ + NoDotHere: "SomeValue8", + OneDotHere: false, + SeveralDotsHere: 1994, + }, + }, + }, + }, + ArrayOfNestedObjects: []leaf{ + { + NoDotHere: "SomeValue9", + OneDotHere: true, + SeveralDotsHere: 3.14159, + }, + { + NoDotHere: "Some.Value10", + OneDotHere: false, + SeveralDotsHere: 589, + }, + }, + } + + testValueBytes, err := json.Marshal(testValue) + require.NoError(t, err) + + testKey := "TestKey6" + + err = store.Put(testKey, testValueBytes) + require.NoError(t, err) + + testValueRetrievedBytes, err := store.Get(testKey) + require.NoError(t, err) + + var testValueRetrieved treeRoot + + err = json.Unmarshal(testValueRetrievedBytes, &testValueRetrieved) + require.NoError(t, err) + + require.True(t, reflect.DeepEqual(testValueRetrieved, testValue), + "Value retrieved from storage not the same as what was put in originally") + }) + }) + t.Run("Attempt to store JSON with a key containing the backtick (`) character", func(t *testing.T) { + testValueUsingBacktickInRootLevel := `{"keyWithBacktick` + "`" + `":"Value"}` + + t.Run("Put", func(t *testing.T) { + t.Run("Invalid character in root level", func(t *testing.T) { + err := store.Put("TestKey4", []byte(testValueUsingBacktickInRootLevel)) + require.EqualError(t, err, "JSON keys cannot have \"`\" characters within them. "+ + "Invalid key: keyWithBacktick`") + }) + t.Run("Invalid character in nested object", func(t *testing.T) { + testValueUsingBacktickInNestedLevel := `{"nestedData":{"keyWithBacktick` + "`" + `":"Value"}}` + + err := store.Put("TestKey4", []byte(testValueUsingBacktickInNestedLevel)) + require.EqualError(t, err, "JSON keys cannot have \"`\" characters within them. "+ + "Invalid key: keyWithBacktick`") + }) + t.Run("Invalid character in object in array", func(t *testing.T) { + testValueUsingBacktickInNestedLevel := `{"arrayData":[{"keyWithBacktick` + "`" + `":"Value"}]}` + + err := store.Put("TestKey4", []byte(testValueUsingBacktickInNestedLevel)) + require.EqualError(t, err, "JSON keys cannot have \"`\" characters within them. "+ + "Invalid key: keyWithBacktick`") + }) + }) + t.Run("Batch - invalid character in root level", func(t *testing.T) { + operations := []storage.Operation{{Key: "TestKey4", Value: []byte(testValueUsingBacktickInRootLevel)}} + err := store.Batch(operations) + require.EqualError(t, err, "JSON keys cannot have \"`\" characters within them. "+ + "Invalid key: keyWithBacktick`") + }) + }) +} + +func testBatchIsNewKeyError(t *testing.T, connString string) { + t.Helper() + + storeName := randomStoreName() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + + err = store.Put("SomeKey", []byte("SomeValue")) + require.NoError(t, err) + + operations := []storage.Operation{ + {Key: "SomeKey", Value: []byte("SomeUpdatedValue"), PutOptions: &storage.PutOptions{IsNewKey: true}}, + } + + err = store.Batch(operations) + + expectedErrMsgPrefix := "failed to perform batch operations after 4 attempts: duplicate key. Either an " + + "InsertOne model is being used for a key that already exists in the database, or, if " + + "using MongoDB 4.0.0, then this may be a transient error due to another call storing data under the same " + + "key at the same time. Underlying error message: bulk write exception: write errors: [E11000 duplicate key " + + "error" + + gotExpectedError := strings.HasPrefix(err.Error(), expectedErrMsgPrefix) + + require.True(t, gotExpectedError, fmt.Sprintf("received unexpected error. Expected the error message to "+ + `start with "%s", but the error was "%s"`, expectedErrMsgPrefix, err.Error())) +} + +func testPing(t *testing.T, connString string) { + t.Helper() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + err = provider.Ping() + require.NoError(t, err) +} + +func testGetAsRawMap(t *testing.T, connString string) { + t.Helper() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + + mongoDBStore, ok := store.(*mongodb.Store) + require.True(t, ok) + + testData := map[string]interface{}{ + "field1": "value1", + "field2": int64(2), + "field3": true, + } + + err = mongoDBStore.PutAsJSON("TestKey", testData) + require.NoError(t, err) + + retrievedTestData, err := mongoDBStore.GetAsRawMap("TestKey") + require.NoError(t, err) + + // The retrieved test data should be the same as the input test data, except that there's an _id field now. + + testData["_id"] = "TestKey" + + require.True(t, reflect.DeepEqual(testData, retrievedTestData), + "unexpected retrieved test data") +} + +func testGetBulkAsRawMap(t *testing.T, connString string) { + t.Helper() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + + var ok bool + + mongoDBStore, ok := store.(*mongodb.Store) + require.True(t, ok) + + _, err = mongoDBStore.GetBulkAsRawMap("TestKey1", "") + require.EqualError(t, err, "key cannot be empty") + + testData1 := map[string]interface{}{ + "field1": "value1", + "field2": int64(2), + "field3": true, + } + + testData2 := map[string]interface{}{ + "field1": "value1", + "field2": int64(2), + "field3": true, + } + + require.NoError(t, mongoDBStore.PutAsJSON("TestKey1", testData1)) + require.NoError(t, mongoDBStore.PutAsJSON("TestKey2", testData2)) + + retrievedTestData, err := mongoDBStore.GetBulkAsRawMap("TestKey1", "TestKey2") + require.NoError(t, err) + require.Len(t, retrievedTestData, 2) + + // The retrieved test data should be the same as the input test data, except that there's an _id field now. + testData1["_id"] = "TestKey1" + testData2["_id"] = "TestKey2" + + require.True(t, reflect.DeepEqual(testData1, retrievedTestData[0]), "unexpected retrieved test data") + require.True(t, reflect.DeepEqual(testData2, retrievedTestData[1]), "unexpected retrieved test data") +} + +func testCustomIndexAndQuery(t *testing.T, connString string) { + t.Helper() + t.Run("Using individual PutAsJSON calls", func(t *testing.T) { + t.Run("Without query options", func(t *testing.T) { + doCustomIndexAndQueryTest(t, connString, false) + }) + t.Run("Using query options", func(t *testing.T) { + doCustomIndexAndQueryTest(t, connString, false, options.Find().SetBatchSize(2)) + }) + }) + t.Run("Using BulkWrite call", func(t *testing.T) { + t.Run("Without query options", func(t *testing.T) { + doCustomIndexAndQueryTest(t, connString, true) + }) + t.Run("Using query options", func(t *testing.T) { + doCustomIndexAndQueryTest(t, connString, true, options.Find().SetBatchSize(2)) + }) + }) + t.Run("Store not found", func(t *testing.T) { + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + err = provider.CreateCustomIndexes("NonExistentStore", mongo.IndexModel{}) + require.Equal(t, err, storage.ErrStoreNotFound) + }) + t.Run("Fail to create indexes", func(t *testing.T) { + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + storeName := randomStoreName() + + _, err = provider.OpenStore(storeName) + require.NoError(t, err) + + err = provider.CreateCustomIndexes(storeName, mongo.IndexModel{}) + require.EqualError(t, err, "failed to create indexes in MongoDB collection: failed to create "+ + "indexes in MongoDB collection: index model keys cannot be nil") + }) +} + +func doCustomIndexAndQueryTest(t *testing.T, connString string, useBatch bool, opts ...*options.FindOptions) { + t.Helper() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + + var ok bool + + mongoDBStore, ok := store.(*mongodb.Store) + require.True(t, ok) + + valuesToStore := generateJSONTestData() + + storeDataWithoutWrapping(t, mongoDBStore, valuesToStore, useBatch) + + createCustomIndexForCustomQueryTests(t, provider, storeName) + + t.Run("Querying for one attribute", func(t *testing.T) { + filter := bson.D{ + { + Key: "indexed.attributes.name", + Value: "Name1", + }, + } + + iterator, err := mongoDBStore.QueryCustom(filter, opts...) + require.NoError(t, err) + require.NotEmpty(t, iterator) + + expectedKeys := []string{"Document1", "Document2"} + + verifyExpectedIterator2(t, iterator, expectedKeys, valuesToStore, nil, 2, true) + }) + t.Run("Querying for two attributes (AND operator)", func(t *testing.T) { + filter := bson.D{ + { + Key: "indexed.attributes.name", + Value: "Name1", + }, + { + Key: "indexed.attributes.value", + Value: "Value1", + }, + } + + iterator, err := mongoDBStore.QueryCustom(filter, opts...) + require.NoError(t, err) + require.NotEmpty(t, iterator) + + expectedKeys := []string{"Document1"} + expectedValues := [][]byte{valuesToStore[0]} + + verifyExpectedIterator2(t, iterator, expectedKeys, expectedValues, nil, 1, true) + }) + t.Run("Querying for two attributes (OR operator)", func(t *testing.T) { + filter := bson.D{ + { + Key: "$or", + Value: bson.A{ + bson.D{{Key: "indexed.attributes.name", Value: "Name3"}}, + bson.D{{Key: "indexed.attributes.name", Value: "Name4"}}, + }, + }, + } + iterator, err := mongoDBStore.QueryCustom(filter, opts...) + require.NoError(t, err) + require.NotEmpty(t, iterator) + + expectedKeys := []string{"Document1", "Document3"} + expectedValues := [][]byte{valuesToStore[0], valuesToStore[2]} + + verifyExpectedIterator2(t, iterator, expectedKeys, expectedValues, nil, 2, true) + }) + t.Run("Querying for multiple attributes (mix of AND and OR operators)", func(t *testing.T) { + filter := bson.D{ + { + Key: "$or", + Value: bson.A{ + bson.D{ + {Key: "indexed.attributes.name", Value: "Name1"}, + {Key: "indexed.attributes.value", Value: "Value2"}, + }, + bson.D{{Key: "indexed.attributes.name", Value: "Name4"}}, + }, + }, + } + iterator, err := mongoDBStore.QueryCustom(filter, opts...) + require.NoError(t, err) + require.NotEmpty(t, iterator) + + expectedKeys := []string{"Document2", "Document3"} + expectedValues := [][]byte{valuesToStore[1], valuesToStore[2]} + + verifyExpectedIterator2(t, iterator, expectedKeys, expectedValues, nil, 2, true) + }) +} + +func testDocumentReplacementAndMarshalling(t *testing.T, connString string) { + t.Helper() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + t.Run("Put method - switching between wrapped data types", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + + mongoDBStore, ok := store.(*mongodb.Store) + require.True(t, ok) + + type testStruct struct { + FieldNameWithCapitalLetters string `json:"fieldNameWithCapitalLetters"` + FieldNameWithOmitEmpty string `json:"fieldNameWithOmitEmpty,omitempty"` + } + + testStruct1 := testStruct{ + FieldNameWithCapitalLetters: "SomeValue", + FieldNameWithOmitEmpty: "SomeOtherValue", + } + + testStruct1Bytes, err := json.Marshal(testStruct1) + require.NoError(t, err) + + err = mongoDBStore.Put("TestKey", testStruct1Bytes) + require.NoError(t, err) + + testStruct1.FieldNameWithOmitEmpty = "" + + err = mongoDBStore.Put("TestKey", []byte{1, 2, 3}) + require.NoError(t, err) + + valueAsMap, err := mongoDBStore.GetAsRawMap("TestKey") + require.NoError(t, err) + + _, found := valueAsMap["doc"] + require.False(t, found, "stored document was not replaced properly") + + _, found = valueAsMap["bin"] + require.True(t, found, "stored document was not replaced properly") + }) + t.Run("PutAsJSON - test JSON struct tags", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + + mongoDBStore, ok := store.(*mongodb.Store) + require.True(t, ok) + + type testStruct struct { + FieldNameWithCapitalLetters string `json:"fieldNameWithCapitalLetters"` + FieldNameWithOmitEmpty string `json:"fieldNameWithOmitEmpty,omitempty"` + } + + testStruct1 := testStruct{ + FieldNameWithCapitalLetters: "SomeValue", + FieldNameWithOmitEmpty: "SomeOtherValue", + } + + err = mongoDBStore.PutAsJSON("TestKey", testStruct1) + require.NoError(t, err) + + testStruct1.FieldNameWithOmitEmpty = "" + + err = mongoDBStore.PutAsJSON("TestKey", testStruct1) + require.NoError(t, err) + + valueAsMap, err := mongoDBStore.GetAsRawMap("TestKey") + require.NoError(t, err) + + _, found := valueAsMap["fieldnamewithcapitalletters"] + require.False(t, found, "field name casing was not maintained") + + _, found = valueAsMap["fieldNameWithCapitalLetters"] + require.True(t, found, "field name missing") + + _, found = valueAsMap["FieldNameWithOmitEmpty"] + require.False(t, found, "empty field was not omitted") + }) +} + +func TestCreateMongoDBFindOptions(t *testing.T) { + s := &mongodb.Store{} + + t.Run("isJSONQuery = false", func(t *testing.T) { + opts := s.CreateMongoDBFindOptions([]storage.QueryOption{ + storage.WithPageSize(1000), + storage.WithInitialPageNum(10), + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "tag1", + }), + }, false) + require.NotNil(t, opts) + require.NotNil(t, opts.BatchSize) + require.Equal(t, int32(1000), *opts.BatchSize) + require.NotNil(t, opts.Skip) + require.Equal(t, int64(10000), *opts.Skip) + require.NotNil(t, opts.Sort) + + sortOpts, ok := opts.Sort.(primitive.D) + require.True(t, ok) + require.Len(t, sortOpts, 1) + require.Equal(t, "tags.tag1", sortOpts[0].Key) + require.Equal(t, -1, sortOpts[0].Value) + }) + + t.Run("isJSONQuery = true", func(t *testing.T) { + opts := s.CreateMongoDBFindOptions([]storage.QueryOption{ + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "tag1", + }), + }, true) + require.NotNil(t, opts) + require.NotNil(t, opts.Sort) + + sortOpts, ok := opts.Sort.(primitive.D) + require.True(t, ok) + require.Len(t, sortOpts, 1) + require.Equal(t, "tag1", sortOpts[0].Key) + require.Equal(t, 1, sortOpts[0].Value) + }) +} + +func testBulkWrite(t *testing.T, connString string) { + t.Helper() + + provider, err := mongodb.NewProvider(connString) + require.NoError(t, err) + + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + + mongoDBStore, ok := store.(*mongodb.Store) + require.True(t, ok) + + type testDataType struct { + StringValue string `json:"stringValue,omitempty"` + IntValue int `json:"intValue,omitempty"` + } + + data1 := testDataType{ + StringValue: "String1", + IntValue: 1, + } + + preparedData1, err := mongodb.PrepareDataForBSONStorage(data1) + require.NoError(t, err) + + data2 := testDataType{ + StringValue: "String2", + IntValue: 2, + } + + preparedData2, err := mongodb.PrepareDataForBSONStorage(data2) + require.NoError(t, err) + + models := []mongo.WriteModel{ + mongo.NewInsertOneModel().SetDocument(preparedData1), + mongo.NewInsertOneModel().SetDocument(preparedData2), + } + + err = mongoDBStore.BulkWrite(models) + require.NoError(t, err) + + filter := bson.D{ + { + Key: "$or", + Value: bson.A{ + bson.D{{Key: "stringValue", Value: "String1"}}, + bson.D{{Key: "stringValue", Value: "String2"}}, + }, + }, + } + + iterator, err := mongoDBStore.QueryCustom(filter) + require.NoError(t, err) + + more, err := iterator.Next() + require.NoError(t, err) + require.True(t, more) + + retrievedData1, err := iterator.ValueAsRawMap() + require.NoError(t, err) + require.Equal(t, "String1", retrievedData1["stringValue"]) + require.Equal(t, int64(1), retrievedData1["intValue"]) + + more, err = iterator.Next() + require.NoError(t, err) + require.True(t, more) + + retrievedData2, err := iterator.ValueAsRawMap() + require.NoError(t, err) + require.Equal(t, "String2", retrievedData2["stringValue"]) + require.Equal(t, int64(2), retrievedData2["intValue"]) + + more, err = iterator.Next() + require.NoError(t, err) + require.False(t, more) + + data1New := testDataType{ + StringValue: "String1_New", + IntValue: 11, + } + + preparedData1New, err := mongodb.PrepareDataForBSONStorage(data1New) + require.NoError(t, err) + + data2New := testDataType{ + StringValue: "String2_New", + IntValue: 22, + } + + preparedData2New, err := mongodb.PrepareDataForBSONStorage(data2New) + require.NoError(t, err) + + models = []mongo.WriteModel{ + mongo.NewReplaceOneModel().SetFilter(bson.M{"stringValue": "String1"}).SetReplacement(preparedData1New), + mongo.NewReplaceOneModel().SetFilter(bson.M{"stringValue": "String2"}).SetReplacement(preparedData2New), + } + + err = mongoDBStore.BulkWrite(models) + require.NoError(t, err) + + filter = bson.D{ + { + Key: "$or", + Value: bson.A{ + bson.D{{Key: "stringValue", Value: "String1_New"}}, + bson.D{{Key: "stringValue", Value: "String2_New"}}, + }, + }, + } + + iterator, err = mongoDBStore.QueryCustom(filter) + require.NoError(t, err) + + more, err = iterator.Next() + require.NoError(t, err) + require.True(t, more) + + retrievedData1New, err := iterator.ValueAsRawMap() + require.NoError(t, err) + require.Equal(t, "String1_New", retrievedData1New["stringValue"]) + require.Equal(t, int64(11), retrievedData1New["intValue"]) + + more, err = iterator.Next() + require.NoError(t, err) + require.True(t, more) + + retrievedData2New, err := iterator.ValueAsRawMap() + require.NoError(t, err) + require.Equal(t, "String2_New", retrievedData2New["stringValue"]) + require.Equal(t, int64(22), retrievedData2New["intValue"]) + + more, err = iterator.Next() + require.NoError(t, err) + require.False(t, more) + + models = []mongo.WriteModel{ + mongo.NewDeleteOneModel().SetFilter(bson.M{"stringValue": "String1_New"}), + mongo.NewDeleteOneModel().SetFilter(bson.M{"stringValue": "DoesNotExist"}), + } + + err = mongoDBStore.BulkWrite(models) + require.NoError(t, err) + + filter = bson.D{ + { + Key: "$or", + Value: bson.A{ + bson.D{{Key: "stringValue", Value: "String1_New"}}, + bson.D{{Key: "stringValue", Value: "String2_New"}}, + }, + }, + } + + iterator, err = mongoDBStore.QueryCustom(filter) + require.NoError(t, err) + + more, err = iterator.Next() + require.NoError(t, err) + require.True(t, more) + + retrievedData2NewAgain, err := iterator.ValueAsRawMap() + require.NoError(t, err) + require.Equal(t, "String2_New", retrievedData2NewAgain["stringValue"]) + require.Equal(t, int64(22), retrievedData2NewAgain["intValue"]) + + more, err = iterator.Next() + require.NoError(t, err) + require.False(t, more) +} + +func getTestData() (testKeys []string, testValues [][]byte, testTags [][]storage.Tag) { + testKeys = []string{ + "Cassie", + "Luna", + "Miku", + "Amber", + "Brandy", + } + + testValues = [][]byte{ + []byte("is a big, young dog"), + []byte("is a small dog"), + []byte("is a fluffy dog (also small)"), + []byte("is a big, old dog"), + []byte("is a big dog of unknown age (but probably old)"), + } + + testTags = [][]storage.Tag{ + { + {Name: "Breed", Value: "GoldenRetriever"}, + {Name: "Personality", Value: "Playful"}, + {Name: "NumLegs", Value: "4"}, + {Name: "EarType", Value: "Floppy"}, + {Name: "Nickname", Value: "Miss"}, + {Name: "Age", Value: "2"}, + }, + { + {Name: "Breed", Value: "Schweenie"}, + {Name: "Personality", Value: "Shy"}, + {Name: "NumLegs", Value: "4"}, + {Name: "EarType", Value: "Pointy"}, + {Name: "Age", Value: "1"}, + }, + { + {Name: "Breed", Value: "Pomchi"}, + {Name: "Personality", Value: "Outgoing"}, + {Name: "NumLegs", Value: "4"}, + {Name: "EarType", Value: "Pointy"}, + {Name: "Nickname", Value: "Fluffball"}, + {Name: "Age", Value: "1"}, + }, + { + {Name: "Breed", Value: "GoldenRetriever"}, + {Name: "Personality", Value: "Calm"}, + {Name: "NumLegs", Value: "4"}, + {Name: "EarType", Value: "Floppy"}, + {Name: "Age", Value: "14"}, + }, + { + {Name: "Breed", Value: "GoldenRetriever"}, + {Name: "Personality", Value: "Calm"}, + {Name: "NumLegs", Value: "4"}, + {Name: "EarType", Value: "Floppy"}, + }, + } + + return testKeys, testValues, testTags +} + +func createCustomIndexForCustomQueryTests(t *testing.T, provider *mongodb.Provider, storeName string) { + t.Helper() + + indexOptions := options.Index() + indexOptions.SetName("Custom Index") + + model := mongo.IndexModel{ + Keys: bson.D{ + {Key: "indexed.attributes.name", Value: 1}, + {Key: "indexed.attributes.value", Value: 1}, + }, + Options: indexOptions, + } + + err := provider.CreateCustomIndexes(storeName, model) + require.NoError(t, err) +} + +func storeDataWithoutWrapping(t *testing.T, store *mongodb.Store, values [][]byte, useBatch bool) { + t.Helper() + + if useBatch { + insertOneModels := make([]mongo.WriteModel, len(values)) + + for i, value := range values { + var valueAsMap map[string]interface{} + + err := json.Unmarshal(value, &valueAsMap) + require.NoError(t, err) + + preparedData, err := mongodb.PrepareDataForBSONStorage(valueAsMap) + require.NoError(t, err) + + preparedData["_id"] = fmt.Sprintf("Document%d", i+1) + + insertOneModels[i] = mongo.NewInsertOneModel().SetDocument(preparedData) + } + + err := store.BulkWrite(insertOneModels) + require.NoError(t, err) + + return + } + + for i, value := range values { + var valueAsMap map[string]interface{} + + err := json.Unmarshal(value, &valueAsMap) + require.NoError(t, err) + + err = store.PutAsJSON(fmt.Sprintf("Document%d", i+1), valueAsMap) + require.NoError(t, err) + } +} + +func generateJSONTestData() [][]byte { + value1Bytes := []byte(`{ + "sequence": 0, + "indexed": [{ + "sequence": 0, + "hmac": { + "id": "https://example.com/kms/z7BgF536GaR", + "type": "Sha256HmacKey2019" + }, + "attributes": [{ + "name": "Name1", + "value": "Value1" + }, { + "name": "Name2", + "value": "Value1", + "anotherField": "Something" + }, + { + "name": "Name3", + "value": "Value1", + "aBoolField": true + }] + }], + "jwe": { + "protected": "eyJlbmMiOiJDMjBQIn0", + "recipients": [ + { + "header": { + "alg": "A256KW", + "kid": "https://example.com/kms/z7BgF536GaR" + }, + "encrypted_key": + "OR1vdCNvf_B68mfUxFQVT-vyXVrBembuiM40mAAjDC1-Qu5iArDbug" + } + ], + "iv": "i8Nins2vTI3PlrYW", + "ciphertext": "Cb-963UCXblINT8F6MDHzMJN9EAhK3I", + "tag": "pfZO0JulJcrc3trOZy8rjA" + } + }`) + + value2Bytes := []byte(`{ + "sequence": 0, + "indexed": [{ + "sequence": 0, + "hmac": { + "id": "https://example.com/kms/z7BgF536GaR", + "type": "Sha256HmacKey2019" + }, + "attributes": [{ + "name": "Name1", + "value": "Value2" + }, { + "name": "Name2", + "value": "Value2" + }] + }], + "jwe": { + "protected": "eyJlbmMiOiJDMjBQIn0", + "recipients": [ + { + "header": { + "alg": "A256KW", + "kid": "https://example.com/kms/z7BgF536GaR" + }, + "encrypted_key": + "OR1vdCNvf_B68mfUxFQVT-vyXVrBembuiM40mAAjDC1-Qu5iArDbug" + } + ], + "iv": "i8Nins2vTI3PlrYW", + "ciphertext": "Cb-963UCXblINT8F6MDHzMJN9EAhK3I", + "tag": "pfZO0JulJcrc3trOZy8rjA" + } + }`) + + value3Bytes := []byte(`{ + "sequence": 0, + "indexed": [{ + "sequence": 0, + "hmac": { + "id": "https://example.com/kms/z7BgF536GaR", + "type": "Sha256HmacKey2019" + }, + "attributes": [{ + "name": "Name4", + "value": "Value1" + }] + }], + "jwe": { + "protected": "eyJlbmMiOiJDMjBQIn0", + "recipients": [ + { + "header": { + "alg": "A256KW", + "kid": "https://example.com/kms/z7BgF536GaR" + }, + "encrypted_key": + "OR1vdCNvf_B68mfUxFQVT-vyXVrBembuiM40mAAjDC1-Qu5iArDbug" + } + ], + "iv": "i8Nins2vTI3PlrYW", + "ciphertext": "Cb-963UCXblINT8F6MDHzMJN9EAhK3I", + "tag": "pfZO0JulJcrc3trOZy8rjA" + } + }`) + + return [][]byte{value1Bytes, value2Bytes, value3Bytes} +} + +// expectedKeys, expectedValues, and expectedTags are with respect to the query's page settings. +// Since iterator.TotalItems' count is not affected by page settings, expectedTotalItemsCount must be passed in and +// can't be determined by looking at the length of expectedKeys, expectedValues, nor expectedTags. +func verifyExpectedIterator2(t *testing.T, actualResultsItr storage.Iterator, expectedKeys []string, + expectedValues [][]byte, expectedTags [][]storage.Tag, expectedTotalItemsCount int, isCustomQueryTest bool) { + t.Helper() + + if !isCustomQueryTest && (len(expectedValues) != len(expectedKeys) || len(expectedTags) != len(expectedKeys)) { + require.FailNow(t, + "Invalid test case. Expected keys, values and tags slices must be the same length.") + } + + verifyIteratorAnyOrder2(t, actualResultsItr, expectedKeys, expectedValues, expectedTags, expectedTotalItemsCount, + isCustomQueryTest) +} + +func verifyIteratorAnyOrder2(t *testing.T, actualResultsItr storage.Iterator, //nolint:gocognit, gocyclo // test file + expectedKeys []string, expectedValues [][]byte, expectedTags [][]storage.Tag, expectedTotalItemsCount int, + isCustomQueryTest bool) { + t.Helper() + + var dataChecklist struct { + keys []string + values [][]byte + tags [][]storage.Tag + received []bool + } + + dataChecklist.keys = expectedKeys + dataChecklist.values = expectedValues + dataChecklist.tags = expectedTags + dataChecklist.received = make([]bool, len(expectedKeys)) + + moreResultsToCheck, err := actualResultsItr.Next() + require.NoError(t, err) + + if !moreResultsToCheck && len(expectedKeys) != 0 { + require.FailNow(t, "query unexpectedly returned no results") + } + + for moreResultsToCheck { + dataReceivedCount := 0 + + for _, received := range dataChecklist.received { + if received { + dataReceivedCount++ + } + } + + if dataReceivedCount == len(dataChecklist.received) { + require.FailNow(t, "iterator contains more results than expected") + } + + var itrErr error + receivedKey, itrErr := actualResultsItr.Key() + require.NoError(t, itrErr) + + var receivedValueMap map[string]interface{} + + var receivedValue []byte + + if isCustomQueryTest { + actualMongoDBResultsItr, ok := actualResultsItr.(mongodb.Iterator) + require.True(t, ok) + + receivedValueMap, itrErr = actualMongoDBResultsItr.ValueAsRawMap() + require.NoError(t, itrErr) + } else { + receivedValue, itrErr = actualResultsItr.Value() + require.NoError(t, itrErr) + } + + receivedTags, itrErr := actualResultsItr.Tags() + require.NoError(t, itrErr) + + for i := 0; i < len(dataChecklist.keys); i++ { + if receivedKey == dataChecklist.keys[i] { //nolint:nestif // test file + if isCustomQueryTest { + if isEquivalentJSON(t, receivedValueMap, dataChecklist.values[i]) { + dataChecklist.received[i] = true + + break + } + } else { + if string(receivedValue) == string(dataChecklist.values[i]) { + if equalTags(receivedTags, dataChecklist.tags[i]) { + dataChecklist.received[i] = true + + break + } + } + } + } + } + + moreResultsToCheck, err = actualResultsItr.Next() + require.NoError(t, err) + } + + count, errTotalItems := actualResultsItr.TotalItems() + require.NoError(t, errTotalItems) + require.Equal(t, expectedTotalItemsCount, count) + + err = actualResultsItr.Close() + require.NoError(t, err) + + for _, received := range dataChecklist.received { + if !received { + require.FailNow(t, "received unexpected query results") + } + } +} + +func isEquivalentJSON(t *testing.T, receivedMapFromMongoDB map[string]interface{}, originalValue []byte) bool { + t.Helper() + + var originalValueAsMap map[string]interface{} + + err := json.Unmarshal(originalValue, &originalValueAsMap) + require.NoError(t, err) + + // Add the _id field that we expect MongoDB to add so the two are comparable. + // Then, remarshal so we can compare to see if we got equivalent JSON. + + originalValueAsMap["_id"] = receivedMapFromMongoDB["_id"] + + remarshalledOriginalValue, err := json.Marshal(originalValueAsMap) + require.NoError(t, err) + + remarshalledReceivedValue, err := json.Marshal(receivedMapFromMongoDB) + require.NoError(t, err) + + return string(remarshalledReceivedValue) == string(remarshalledOriginalValue) +} + +func startMongoDBContainer(t *testing.T, dockerMongoDBTag string) (*dctest.Pool, *dctest.Resource) { + t.Helper() + + pool, err := dctest.NewPool("") + require.NoError(t, err) + + mongoDBResource, err := pool.RunWithOptions(&dctest.RunOptions{ + Repository: dockerMongoDBImage, + Tag: dockerMongoDBTag, + PortBindings: map[dc.Port][]dc.PortBinding{ + "27017/tcp": {{HostIP: "", HostPort: "27017"}}, + }, + }) + require.NoError(t, err) + + require.NoError(t, waitForMongoDBToBeUp()) + + return pool, mongoDBResource +} + +func waitForMongoDBToBeUp() error { + return backoff.Retry(pingMongoDB, backoff.WithMaxRetries(backoff.NewConstantBackOff(time.Second), 30)) +} + +func pingMongoDB() error { + var err error + + tM := reflect.TypeOf(bson.M{}) + reg := bson.NewRegistryBuilder().RegisterTypeMapEntry(bsontype.EmbeddedDocument, tM).Build() + clientOpts := options.Client().SetRegistry(reg).ApplyURI(mongoDBConnString) + + mongoClient, err := mongo.NewClient(clientOpts) + if err != nil { + return err + } + + err = mongoClient.Connect(context.Background()) + if err != nil { + return errors.Wrap(err, "error connecting to mongo") + } + + db := mongoClient.Database("test") + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + + return db.Client().Ping(ctx, nil) +} diff --git a/component/wallet-cli/internal/storage/mongodb/support_test.go b/component/wallet-cli/internal/storage/mongodb/support_test.go new file mode 100644 index 000000000..b0889125e --- /dev/null +++ b/component/wallet-cli/internal/storage/mongodb/support_test.go @@ -0,0 +1,3914 @@ +/* +Copyright Gen Digital Inc. All Rights Reserved. +SPDX-License-Identifier: Apache-2.0 +*/ +package mongodb_test + +import ( + "encoding/json" + "errors" + "testing" + + "github.com/google/uuid" + "github.com/hyperledger/aries-framework-go/spi/storage" + "github.com/stretchr/testify/require" +) + +// CheckAll tests common storage functionality. +// These tests demonstrate behaviour that is expected to be consistent across store implementations. +// Some tests can be skipped by passing in the appropriate TestOptions here. +func CheckAll(t *testing.T, provider storage.Provider) { + // Run this first so the store count is predictable. + t.Run("Provider: GetOpenStores", func(t *testing.T) { + ProviderGetOpenStores(t, provider) + }) + t.Run("Provider: open store and set/get config", func(t *testing.T) { + ProviderOpenStoreSetGetConfig(t, provider) + }) + t.Run("Store", func(t *testing.T) { + t.Run("Put and Get", func(t *testing.T) { + PutGet(t, provider) + }) + t.Run("GetTags", func(t *testing.T) { + StoreGetTags(t, provider) + }) + t.Run("GetBulk", func(t *testing.T) { + StoreGetBulk(t, provider) + }) + t.Run("Delete", func(t *testing.T) { + StoreDelete(t, provider) + }) + t.Run("Query", func(t *testing.T) { + StoreQuery(t, provider) + StoreQueryWithSortingAndInitialPageOptions(t, provider) + }) + t.Run("Batch", func(t *testing.T) { + StoreBatch(t, provider) + }) + t.Run("Flush", func(t *testing.T) { + StoreFlush(t, provider) + }) + t.Run("Close", func(t *testing.T) { + StoreClose(t, provider) + }) + }) + // Run this last since it may render the provider object unusable afterwards, depending on the implementation. + t.Run("Provider: close", func(t *testing.T) { + ProviderClose(t, provider) + }) +} + +// ProviderOpenStoreSetGetConfig tests common Provider OpenStore, SetStoreConfig, and GetStoreConfig functionality. +func ProviderOpenStoreSetGetConfig(t *testing.T, provider storage.Provider) { //nolint: funlen // Test file + t.Run("Set store config with all new tags", func(t *testing.T) { + testStoreName := randomStoreName() + + store, err := provider.OpenStore(testStoreName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + config := storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3"}} + + err = provider.SetStoreConfig(testStoreName, config) + require.NoError(t, err) + + retrievedConfig, err := provider.GetStoreConfig(testStoreName) + require.NoError(t, err) + require.NotNil(t, retrievedConfig) + require.True(t, equalTagNamesAnyOrder(config.TagNames, retrievedConfig.TagNames), + "Unexpected tag names") + }) + t.Run("Merge a new tag name in with existing tag names in a store config", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + // Set initial tags. + err = provider.SetStoreConfig(storeName, storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2"}}) + require.NoError(t, err) + + // Get the tags we just set, append a new one, and re-set the store configuration. + + config, err := provider.GetStoreConfig(storeName) + require.NoError(t, err) + + config.TagNames = append(config.TagNames, "tagName3") + + err = provider.SetStoreConfig(storeName, config) + require.NoError(t, err) + + // Verify that the config contains all three tags. + + expectedTagNames := []string{"tagName1", "tagName2", "tagName3"} + + retrievedConfig, err := provider.GetStoreConfig(storeName) + require.NoError(t, err) + require.NotNil(t, retrievedConfig) + require.True(t, equalTagNamesAnyOrder(expectedTagNames, retrievedConfig.TagNames), + "Unexpected tag names") + }) + t.Run("Remove all existing tag names in a store config", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + // Set initial tags. + err = provider.SetStoreConfig(storeName, storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2"}}) + require.NoError(t, err) + + // Delete all existing tag names in the config by passing in an empty spi.StoreConfiguration. + err = provider.SetStoreConfig(storeName, storage.StoreConfiguration{}) + require.NoError(t, err) + + // Verify that the store config now has no tag names. + config, err := provider.GetStoreConfig(storeName) + require.NoError(t, err) + require.True(t, equalTagNamesAnyOrder(nil, config.TagNames), "Unexpected tag names") + }) + t.Run("Merge a new tag in with existing tags while deleting some too", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + // Set initial tags. + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2"}}) + require.NoError(t, err) + + // Now we want tagName1 to be removed, tagName2 to be kept, and tagName3 to be added. + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName2", "tagName3"}}) + require.NoError(t, err) + + expectedTagNames := []string{"tagName2", "tagName3"} + + // Verify that tagName1 was removed, tagName2 was kept, and tagName3 was added. + config, err := provider.GetStoreConfig(storeName) + require.NoError(t, err) + require.True(t, equalTagNamesAnyOrder(expectedTagNames, config.TagNames), "Unexpected tag names") + }) + t.Run("Attempt to set config without opening store first", func(t *testing.T) { + err := provider.SetStoreConfig("NonExistentStore", storage.StoreConfiguration{}) + require.True(t, errors.Is(err, storage.ErrStoreNotFound), "Got unexpected error or no error") + }) + t.Run("Attempt to set a config that specifies a tag name with a ':' character", func(t *testing.T) { + testStoreName := randomStoreName() + + store, err := provider.OpenStore(testStoreName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + // Tag names cannot contain any ':' characters since it's a reserved character in the query syntax. + // It would be impossible to do a query for one of these tags, so we must not allow it in the first place. + config := storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagNameWith:Character"}} + + err = provider.SetStoreConfig(testStoreName, config) + require.Error(t, err) + }) + t.Run("Attempt to get config without opening store first", func(t *testing.T) { + config, err := provider.GetStoreConfig("NonExistentStore") + require.True(t, errors.Is(err, storage.ErrStoreNotFound), "Got unexpected error or no error") + require.Empty(t, config) + }) + t.Run("Attempt to open a store with a blank name", func(t *testing.T) { + store, err := provider.OpenStore("") + + require.Error(t, err) + require.Nil(t, store) + }) + t.Run("Demonstrate that store names are not case-sensitive", func(t *testing.T) { + // Per the interface, store names are not supposed to be case-sensitive in order to ensure consistency across + // storage implementations - some of which don't support case sensitivity in their database names. + + storeWithCapitalLetter, err := provider.OpenStore("Some-store-name") + require.NoError(t, err) + + // Despite the different capitalization, this should still set the store config on the store opened above. + err = provider.SetStoreConfig("SoMe-stoRe-naMe", storage.StoreConfiguration{TagNames: []string{"TagName1"}}) + require.NoError(t, err) + + // Despite the different capitalization, this should still get the store config we set above. + storeConfig, err := provider.GetStoreConfig("sOME-sToRe-NamE") + require.NoError(t, err) + + require.Len(t, storeConfig.TagNames, 1) + require.Equal(t, "TagName1", storeConfig.TagNames[0]) + + defer func() { + require.NoError(t, storeWithCapitalLetter.Close()) + }() + + err = storeWithCapitalLetter.Put("key", []byte("value")) + require.NoError(t, err) + + // If the store names are properly case-insensitive, then it's expected that the store below + // contains the same data as the one above. + storeWithLowercaseLetter, err := provider.OpenStore("some-store-name") + require.NoError(t, err) + + defer func() { + require.NoError(t, storeWithLowercaseLetter.Close()) + }() + + value, err := storeWithLowercaseLetter.Get("key") + require.NoError(t, err) + require.Equal(t, "value", string(value)) + }) +} + +// ProviderGetOpenStores tests common Provider GetOpenStores functionality. +// This test assumes that the provider passed in has never had stores created under it before. +func ProviderGetOpenStores(t *testing.T, provider storage.Provider) { + // No stores have been created yet, so the slice should be empty or nil. + openStores := provider.GetOpenStores() + require.Len(t, openStores, 0) + + store1, err := provider.OpenStore("testStore1") + require.NoError(t, err) + + defer func() { + // Although we close store1 later on as part of this test, in case it fails early we still need to make + // sure it's closed. Closing a store multiple times should not cause an error. + require.NoError(t, store1.Close()) + }() + + openStores = provider.GetOpenStores() + require.Len(t, openStores, 1) + + store2, err := provider.OpenStore("testStore2") + require.NoError(t, err) + + defer func() { + // Although we close store2 later on as part of this test, in case it fails early we still need to make + // sure it's closed. Closing a store multiple times should not cause an error. + require.NoError(t, store2.Close()) + }() + + openStores = provider.GetOpenStores() + require.Len(t, openStores, 2) + + // Now we will attempt to open a previously opened store. Since it was opened previously, we expect that the + // number of open stores returned by GetOpenStores() to not change. + store2Reopened, err := provider.OpenStore("testStore2") + require.NoError(t, err) + + defer func() { + require.NoError(t, store2Reopened.Close()) + }() + + openStores = provider.GetOpenStores() + require.Len(t, openStores, 2) + + // Now we will attempt to open a store with the same name as before, but different casing. Since store names are + // supposed to be case-insensitive, this shouldn't change the number of currently open stores.. + _, err = provider.OpenStore("teststore2") + require.NoError(t, err) + + openStores = provider.GetOpenStores() + require.Len(t, openStores, 2) + + err = store1.Close() + require.NoError(t, err) + + openStores = provider.GetOpenStores() + require.Len(t, openStores, 1) + + err = store2.Close() + require.NoError(t, err) + + openStores = provider.GetOpenStores() + require.Len(t, openStores, 0) +} + +// ProviderClose tests common Provider Close functionality. +func ProviderClose(t *testing.T, provider storage.Provider) { + t.Run("Success", func(t *testing.T) { + err := provider.Close() + require.NoError(t, err) + }) +} + +// PutGet tests common Store Put and Get functionality. +func PutGet(t *testing.T, provider storage.Provider) { //nolint: funlen // Test file + testKeyNonURL := "TestKey" + testKeyURL := "https://example.com" + + testValueSimpleString := "TestValue" + testValueSimpleString2 := "TestValue2" + testBinaryData := []byte{0x5f, 0xcb, 0x5c, 0xe9, 0x7f, 0xe3, 0x81} + testBinaryData2 := []byte{0x5f, 0xcb, 0x5c, 0xe9, 0x7f} + testValueJSONString := `"TestValue"` + + t.Run("Put and get a value", func(t *testing.T) { + t.Run("Key is not a URL", func(t *testing.T) { + t.Run("Value is simple text", func(t *testing.T) { + doPutThenGetTest(t, provider, testKeyNonURL, []byte(testValueSimpleString)) + }) + t.Run("Value is JSON-formatted object", func(t *testing.T) { + doPutThenGetTestWithJSONFormattedObject(t, provider, testKeyNonURL) + }) + t.Run("Value is JSON-formatted string", func(t *testing.T) { + doPutThenGetTest(t, provider, testKeyNonURL, []byte(testValueJSONString)) + }) + t.Run("Value is binary data", func(t *testing.T) { + doPutThenGetTest(t, provider, testKeyNonURL, testBinaryData) + }) + }) + t.Run("Key is a URL", func(t *testing.T) { + t.Run("Value is simple text", func(t *testing.T) { + doPutThenGetTest(t, provider, testKeyURL, []byte(testValueSimpleString)) + }) + t.Run("Value is JSON-formatted object", func(t *testing.T) { + doPutThenGetTestWithJSONFormattedObject(t, provider, testKeyURL) + }) + t.Run("Value is JSON-formatted string", func(t *testing.T) { + doPutThenGetTest(t, provider, testKeyURL, []byte(testValueJSONString)) + }) + t.Run("Value is binary data", func(t *testing.T) { + doPutThenGetTest(t, provider, testKeyURL, testBinaryData) + }) + }) + }) + t.Run("Put a value, update it, and get the updated value", func(t *testing.T) { + t.Run("Key is not a URL", func(t *testing.T) { + t.Run("Value is simple text", func(t *testing.T) { + doPutThenUpdateThenGetTest(t, provider, testKeyNonURL, + []byte(testValueSimpleString), []byte(testValueSimpleString2)) + }) + t.Run("Value is JSON-formatted object", func(t *testing.T) { + doPutThenUpdateThenGetTestWithJSONFormattedObject(t, provider, testKeyNonURL) + }) + t.Run("Value is binary data", func(t *testing.T) { + doPutThenUpdateThenGetTest(t, provider, testKeyNonURL, testBinaryData, testBinaryData2) + }) + }) + t.Run("Key is a URL", func(t *testing.T) { + t.Run("Value is simple text", func(t *testing.T) { + doPutThenUpdateThenGetTest(t, provider, testKeyURL, []byte(testValueSimpleString), + []byte(testValueSimpleString2)) + }) + t.Run("Value is JSON-formatted object", func(t *testing.T) { + doPutThenUpdateThenGetTestWithJSONFormattedObject(t, provider, testKeyURL) + }) + t.Run("Value is binary data", func(t *testing.T) { + doPutThenUpdateThenGetTest(t, provider, testKeyURL, testBinaryData, testBinaryData2) + }) + }) + }) + t.Run("Put a single value, then delete it, then put again using the same key", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put(testKeyNonURL, []byte(testValueSimpleString)) + require.NoError(t, err) + + err = store.Delete(testKeyNonURL) + require.NoError(t, err) + + err = store.Put(testKeyNonURL, []byte("TestValue2")) + require.NoError(t, err) + + value, err := store.Get(testKeyNonURL) + require.NoError(t, err) + require.Equal(t, "TestValue2", string(value)) + }) + t.Run("Tests demonstrating proper store namespacing", func(t *testing.T) { + t.Run("Put key + value in one store, "+ + "then check that it can't be found in a second store with a different name", func(t *testing.T) { + store1, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store1.Close()) + }() + + err = store1.Put(testKeyNonURL, []byte(testValueSimpleString)) + require.NoError(t, err) + + store2, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store2.Close()) + }() + + // Store 2 should be disjoint from store 1. It should not contain the key + value pair from store 1. + value, err := store2.Get(testKeyNonURL) + require.True(t, errors.Is(err, storage.ErrDataNotFound), "Got unexpected error or no error") + require.Nil(t, value) + }) + t.Run("Put same key + value in two stores with different names, then update value in one store, "+ + "then check that the other store was not changed", + func(t *testing.T) { + store1, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store1.Close()) + }() + + err = store1.Put(testKeyNonURL, []byte(testValueSimpleString)) + require.NoError(t, err) + + store2, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store2.Close()) + }() + + err = store2.Put(testKeyNonURL, []byte(testValueSimpleString)) + require.NoError(t, err) + + // Now both store 1 and 2 contain the same key + value pair. + + newTestValue := testValueSimpleString + "_new" + + // Now update the value in only store 1. + err = store1.Put(testKeyNonURL, []byte(newTestValue)) + require.NoError(t, err) + + // Store 1 should have the new value. + value, err := store1.Get(testKeyNonURL) + require.NoError(t, err) + require.Equal(t, newTestValue, string(value)) + + // Store 2 should still have the old value. + value, err = store2.Get(testKeyNonURL) + require.NoError(t, err) + require.Equal(t, testValueSimpleString, string(value)) + }) + t.Run("Put same key + value in two stores with different names, then delete value in one store, "+ + "then check that the other store still has its key+value pair intact", + func(t *testing.T) { + store1, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store1.Close()) + }() + + err = store1.Put(testKeyNonURL, []byte(testValueSimpleString)) + require.NoError(t, err) + + store2, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store2.Close()) + }() + + err = store2.Put(testKeyNonURL, []byte(testValueSimpleString)) + require.NoError(t, err) + + // Now both store 1 and 2 contain the same key + value pair. + + // Now delete the key + value pair in only store 1. + err = store1.Delete(testKeyNonURL) + require.NoError(t, err) + + // Store 1 should no longer have the key + value pair. + value, err := store1.Get(testKeyNonURL) + require.True(t, errors.Is(err, storage.ErrDataNotFound), "Got unexpected error or no error") + require.Nil(t, value) + + // Store 2 should still have the key + value pair. + value, err = store2.Get(testKeyNonURL) + require.NoError(t, err) + require.Equal(t, testValueSimpleString, string(value)) + }) + t.Run("Put same key + value in two stores with the same name (so they should point to the same "+ + "underlying databases), then update value in one store, then check that the other store also reflects this", + func(t *testing.T) { + storeName := randomStoreName() + + store1, err := provider.OpenStore(storeName) + require.NoError(t, err) + + defer func() { + require.NoError(t, store1.Close()) + }() + + err = store1.Put(testKeyNonURL, []byte(testValueSimpleString)) + require.NoError(t, err) + + // Store 2 should contain the same data as store 1 since they were opened with the same name. + store2, err := provider.OpenStore(storeName) + require.NoError(t, err) + + defer func() { + require.NoError(t, store2.Close()) + }() + + // Store 2 should find the same data that was put in store 1 + + valueFromStore1, err := store1.Get(testKeyNonURL) + require.NoError(t, err) + + valueFromStore2, err := store2.Get(testKeyNonURL) + require.NoError(t, err) + + require.Equal(t, string(valueFromStore1), string(valueFromStore2)) + }) + t.Run("Put same key + value in two stores with the same name (so they should point to the same "+ + "underlying databases), then delete value in one store, then check that the other store also reflects this", + func(t *testing.T) { + storeName := randomStoreName() + + store1, err := provider.OpenStore(storeName) + require.NoError(t, err) + + defer func() { + require.NoError(t, store1.Close()) + }() + + err = store1.Put(testKeyNonURL, []byte(testValueSimpleString)) + require.NoError(t, err) + + // Store 2 should contain the same data as store 1 since they were opened with the same name. + store2, err := provider.OpenStore(storeName) + require.NoError(t, err) + + defer func() { + require.NoError(t, store2.Close()) + }() + + err = store2.Put(testKeyNonURL, []byte(testValueSimpleString)) + require.NoError(t, err) + + // Now both store 1 and 2 contain the same key + value pair. + + // Now delete the key + value pair in store 1. + err = store1.Delete(testKeyNonURL) + require.NoError(t, err) + + // Both store 1 and store 2 should no longer have the key + value pair. + value, err := store1.Get(testKeyNonURL) + require.True(t, errors.Is(err, storage.ErrDataNotFound), "Got unexpected error or no error") + require.Nil(t, value) + + value, err = store2.Get(testKeyNonURL) + require.True(t, errors.Is(err, storage.ErrDataNotFound), "Got unexpected error or no error") + require.Nil(t, value) + }) + }) + t.Run("Get using empty key", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + _, err = store.Get("") + require.Error(t, err) + }) + t.Run("Put with empty key", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put("", []byte(testValueSimpleString)) + require.Error(t, err) + }) + t.Run("Put with vil value", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put(testKeyNonURL, nil) + require.Error(t, err) + }) + t.Run("Put with tag containing a ':' character", func(t *testing.T) { + t.Run("First tag name contains a ':'", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put(testKeyNonURL, []byte("value"), + []storage.Tag{ + {Name: "TagName1With:Character", Value: "TagValue1"}, + {Name: "TagName2", Value: "TagValue2"}, + }...) + require.Error(t, err) + }) + t.Run("First tag value contains a ':'", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put(testKeyNonURL, []byte("value"), + []storage.Tag{ + {Name: "TagName1", Value: "TagValue1With:Character"}, + {Name: "TagName2", Value: "TagValue2"}, + }...) + require.Error(t, err) + }) + t.Run("Second tag name contains a ':'", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put(testKeyNonURL, []byte("value"), + []storage.Tag{ + {Name: "TagName1", Value: "TagValue1"}, + {Name: "TagName2With:Character", Value: "TagValue2"}, + }...) + require.Error(t, err) + }) + t.Run("Second tag value contains a ':'", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put(testKeyNonURL, []byte("value"), + []storage.Tag{ + {Name: "TagName1", Value: "TagValue1"}, + {Name: "TagName2", Value: "TagValue2With:Character"}, + }...) + require.Error(t, err) + }) + }) +} + +// StoreGetTags tests common Store GetTags functionality. +func StoreGetTags(t *testing.T, provider storage.Provider) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2"}}) + require.NoError(t, err) + + t.Run("Successfully retrieve tags", func(t *testing.T) { + // For storage providers that support sorting, they may store numbers differently in order to allow them to + // sort correctly as per the storage interface documentation. + // These tests ensure that the tag values are still retrieved + t.Run("Tag values are strings", func(t *testing.T) { + tags := []storage.Tag{{Name: "tagName1", Value: "tagValue1"}, {Name: "tagName2", Value: "tagValue2"}} + + key := "key" + + err = store.Put(key, []byte("value1"), tags...) + require.NoError(t, err) + + receivedTags, errGetTags := store.GetTags(key) + require.NoError(t, errGetTags) + require.True(t, equalTags(tags, receivedTags), "Got unexpected tags") + }) + t.Run("Tag values are decimal numbers", func(t *testing.T) { + tags := []storage.Tag{{Name: "tagName1", Value: "1"}, {Name: "tagName2", Value: "2"}} + + key := "key2" + + err = store.Put(key, []byte("value1"), tags...) + require.NoError(t, err) + + receivedTags, errGetTags := store.GetTags(key) + require.NoError(t, errGetTags) + require.True(t, equalTags(tags, receivedTags), "Got unexpected tags") + }) + }) + t.Run("Data not found", func(t *testing.T) { + tags, err := store.GetTags("NonExistentKey") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "Got unexpected error or no error") + require.Empty(t, tags) + }) + t.Run("Empty key", func(t *testing.T) { + tags, err := store.GetTags("") + require.Error(t, err) + require.Empty(t, tags) + }) +} + +// StoreGetBulk tests common Store GetBulk functionality. +func StoreGetBulk(t *testing.T, provider storage.Provider) { //nolint: funlen // Test file + t.Run("All values found", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put("key1", []byte("value1"), + []storage.Tag{ + {Name: "tagName1", Value: "tagValue1"}, + {Name: "tagName2", Value: "tagValue2"}, + }...) + require.NoError(t, err) + + err = store.Put("key2", []byte(`"value2"`), + []storage.Tag{ + {Name: "tagName1", Value: "tagValue1"}, + {Name: "tagName2", Value: "tagValue2"}, + }...) + require.NoError(t, err) + + values, err := store.GetBulk("key1", "key2") + require.NoError(t, err) + require.Len(t, values, 2) + require.Equal(t, "value1", string(values[0])) + require.Equal(t, `"value2"`, string(values[1])) + }) + t.Run("Two values found, one not", func(t *testing.T) { + t.Run("Value not found was the second one", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put("key1", []byte("value1"), + []storage.Tag{ + {Name: "tagName1", Value: "tagValue1"}, + {Name: "tagName2", Value: "tagValue2"}, + }...) + require.NoError(t, err) + + err = store.Put("key2", []byte("value2"), + []storage.Tag{ + {Name: "tagName1", Value: "tagValue1"}, + {Name: "tagName2", Value: "tagValue2"}, + }...) + require.NoError(t, err) + + values, err := store.GetBulk("key1", "nonexistentkey", "key2") + require.NoError(t, err) + + require.Len(t, values, 3) + require.Equal(t, "value1", string(values[0])) + require.Nil(t, values[1]) + require.Equal(t, "value2", string(values[2])) + }) + t.Run("Value not found was the third one", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put("key1", []byte("value1"), + []storage.Tag{ + {Name: "tagName1", Value: "tagValue1"}, + {Name: "tagName2", Value: "tagValue2"}, + }...) + require.NoError(t, err) + + err = store.Put("key2", []byte("value2"), + []storage.Tag{ + {Name: "tagName1", Value: "tagValue1"}, + {Name: "tagName2", Value: "tagValue2"}, + }...) + require.NoError(t, err) + + values, err := store.GetBulk("key1", "key2", "nonexistentkey") + require.NoError(t, err) + + require.Len(t, values, 3) + require.Equal(t, "value1", string(values[0])) + require.Equal(t, "value2", string(values[1])) + require.Nil(t, values[2]) + }) + }) + t.Run("One value found, one not because it was deleted", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put("key1", []byte("value1"), + []storage.Tag{ + {Name: "tagName1", Value: "tagValue1"}, + {Name: "tagName2", Value: "tagValue2"}, + }...) + require.NoError(t, err) + + err = store.Put("key2", []byte("value2"), + []storage.Tag{ + {Name: "tagName1", Value: "tagValue1"}, + {Name: "tagName2", Value: "tagValue2"}, + }...) + require.NoError(t, err) + + err = store.Delete("key2") + require.NoError(t, err) + + values, err := store.GetBulk("key1", "key2") + require.NoError(t, err) + require.Len(t, values, 2) + require.Equal(t, "value1", string(values[0])) + require.Nil(t, values[1]) + }) + t.Run("No values found", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put("key1", []byte("value1"), + []storage.Tag{ + {Name: "tagName1", Value: "tagValue1"}, + {Name: "tagName2", Value: "tagValue2"}, + }...) + require.NoError(t, err) + + values, err := store.GetBulk("key3", "key4") + require.NoError(t, err) + require.Len(t, values, 2) + require.Nil(t, values[0]) + require.Nil(t, values[1]) + }) + t.Run("Nil keys slice", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + values, err := store.GetBulk(nil...) + require.Error(t, err) + require.Nil(t, values) + }) + t.Run("Empty keys slice", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + values, err := store.GetBulk(make([]string, 0)...) + require.Error(t, err) + require.Nil(t, values) + }) + t.Run("Third key is empty", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + values, err := store.GetBulk("key1", "key2", "") + require.Error(t, err) + require.Nil(t, values) + }) +} + +// StoreDelete tests common Store Delete functionality. +func StoreDelete(t *testing.T, provider storage.Provider) { + t.Run("Delete a stored key", func(t *testing.T) { + const testKey = "key" + + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put(testKey, []byte("value1")) + require.NoError(t, err) + + err = store.Delete(testKey) + require.NoError(t, err) + + value, err := store.Get(testKey) + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Empty(t, value) + }) + t.Run("Delete a key that doesn't exist (not considered an error)", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Delete("NonExistentKey") + require.NoError(t, err) + }) + t.Run("Delete with blank key argument", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Delete("") + require.Error(t, err) + }) +} + +// StoreQuery tests common Store Query functionality. +func StoreQuery(t *testing.T, provider storage.Provider) { + doStoreQueryTests(t, provider, false) + doStoreQueryTests(t, provider, true) +} + +// StoreQueryWithSortingAndInitialPageOptions tests common Store Query functionality when the sorting and initial +// page options are used. +func StoreQueryWithSortingAndInitialPageOptions(t *testing.T, provider storage.Provider) { + + doStoreQueryWithSortingAndInitialPageOptionsTests(t, provider, false) + + doStoreQueryWithSortingAndInitialPageOptionsTests(t, provider, true) +} + +// StoreBatch tests common Store Batch functionality. +func StoreBatch(t *testing.T, provider storage.Provider) { // nolint:funlen // Test file + t.Run("Success: put three new values", func(t *testing.T) { + doBatchTestPutThreeValues(t, provider, false) + doBatchTestPutThreeValues(t, provider, true) + }) + t.Run("Success: update three different previously-stored values", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{ + "tagName1", "tagName2", "tagName3", + "tagName2_new", "tagName3_new", + }}) + require.NoError(t, err) + + err = store.Put("key1", []byte("value1"), []storage.Tag{{Name: "tagName1", Value: "tagValue1"}}...) + require.NoError(t, err) + + err = store.Put("key2", []byte("value2"), []storage.Tag{{Name: "tagName2", Value: "tagValue2"}}...) + require.NoError(t, err) + + err = store.Put("key3", []byte("value3"), []storage.Tag{{Name: "tagName3", Value: "tagValue3"}}...) + require.NoError(t, err) + + key1UpdatedTagsToStore := []storage.Tag{{Name: "tagName1"}} + key2UpdatedTagsToStore := []storage.Tag{{Name: "tagName2_new", Value: "tagValue2"}} + key3UpdatedTagsToStore := []storage.Tag{{Name: "tagName3_new", Value: "tagValue3_new"}} + + operations := []storage.Operation{ + {Key: "key1", Value: []byte("value1_new"), Tags: key1UpdatedTagsToStore}, + {Key: "key2", Value: []byte("value2_new"), Tags: key2UpdatedTagsToStore}, + {Key: "key3", Value: []byte("value3_new"), Tags: key3UpdatedTagsToStore}, + } + + err = store.Batch(operations) + require.NoError(t, err) + + // Check and make sure all values and tags were stored + + value, err := store.Get("key1") + require.NoError(t, err) + require.Equal(t, "value1_new", string(value)) + retrievedTags, err := store.GetTags("key1") + require.True(t, equalTags(key1UpdatedTagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + + value, err = store.Get("key2") + require.NoError(t, err) + require.Equal(t, "value2_new", string(value)) + retrievedTags, err = store.GetTags("key2") + require.True(t, equalTags(key2UpdatedTagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + + value, err = store.Get("key3") + require.NoError(t, err) + require.Equal(t, "value3_new", string(value)) + retrievedTags, err = store.GetTags("key3") + require.True(t, equalTags(key3UpdatedTagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + }) + t.Run("Success: delete three different previously-stored values", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3"}}) + require.NoError(t, err) + + err = store.Put("key1", []byte("value1"), []storage.Tag{{Name: "tagName1", Value: "tagValue1"}}...) + require.NoError(t, err) + + err = store.Put("key2", []byte("value2"), []storage.Tag{{Name: "tagName2", Value: "tagValue2"}}...) + require.NoError(t, err) + + err = store.Put("key3", []byte("value3"), []storage.Tag{{Name: "tagName3", Value: "tagValue3"}}...) + require.NoError(t, err) + + operations := []storage.Operation{ + {Key: "key1", Value: nil, Tags: nil}, + {Key: "key2", Value: nil, Tags: nil}, + {Key: "key3", Value: nil, Tags: nil}, + } + + err = store.Batch(operations) + require.NoError(t, err) + + // Check and make sure the values can't be found now + + value, err := store.Get("key1") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, value) + tags, err := store.GetTags("key1") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, tags) + + value, err = store.Get("key2") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, value) + tags, err = store.GetTags("key2") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, tags) + + value, err = store.Get("key3") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, value) + tags, err = store.GetTags("key3") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, tags) + }) + t.Run("Success: put one value, update one value, delete one value", func(t *testing.T) { + doBatchTestPutOneUpdateOneDeleteOne(t, provider, false) + doBatchTestPutOneUpdateOneDeleteOne(t, provider, false) + }) + t.Run("Success: delete three values, only two of which were previously-stored", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3"}}) + require.NoError(t, err) + + err = store.Put("key1", []byte("value1"), []storage.Tag{{Name: "tagName1", Value: "tagValue1"}}...) + require.NoError(t, err) + + err = store.Put("key3", []byte("value3"), []storage.Tag{{Name: "tagName3", Value: "tagValue3"}}...) + require.NoError(t, err) + + operations := []storage.Operation{ + {Key: "key1", Value: nil, Tags: nil}, + {Key: "key5", Value: []byte("whatever"), Tags: nil}, + {Key: "key2", Value: nil, Tags: nil}, // key2 doesn't exist in the store, but this should not cause an error + {Key: "key3", Value: nil, Tags: nil}, + } + + err = store.Batch(operations) + require.NoError(t, err) + + // Check and make sure the values can't be found now + + value, err := store.Get("key1") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, value) + tags, err := store.GetTags("key1") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, tags) + + value, err = store.Get("key3") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, value) + tags, err = store.GetTags("key3") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, tags) + }) + t.Run("Success: put value and then delete it in the same Batch call", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1"}}) + require.NoError(t, err) + + operations := []storage.Operation{ + {Key: "key1", Value: []byte("value1"), Tags: []storage.Tag{{Name: "tagName1", Value: "tagValue1"}}}, + {Key: "key1", Value: nil, Tags: nil}, + } + + err = store.Batch(operations) + require.NoError(t, err) + + // Check and make sure that the delete effectively "overrode" the put in the Batch call. + + value, err := store.Get("key1") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, value) + tags, err := store.GetTags("key1") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, tags) + }) + t.Run("Success: put value and update it in the same Batch call", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3"}}) + require.NoError(t, err) + + updatedTagsToStore := []storage.Tag{{Name: "tagName2", Value: "tagValue2"}} + + operations := []storage.Operation{ + {Key: "key1", Value: []byte("value1"), Tags: []storage.Tag{{Name: "tagName1", Value: "tagValue1"}}}, + {Key: "key1", Value: []byte("value2"), Tags: updatedTagsToStore}, + } + + err = store.Batch(operations) + require.NoError(t, err) + + // Check and make sure that the second put operation effectively "overrode" the first operation + // from the user's perspective. + + value, err := store.Get("key1") + require.NoError(t, err) + require.Equal(t, "value2", string(value)) + retrievedTags, err := store.GetTags("key1") + require.True(t, equalTags(updatedTagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + }) + t.Run("Success: update previously-stored value and delete it in the same Batch call", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{ + "tagName1", "tagName2", "tagName3", + "tagName2_new", "tagName3_new", + }}) + require.NoError(t, err) + + err = store.Put("key1", []byte("value1"), []storage.Tag{{Name: "tagName1", Value: "tagValue1"}}...) + require.NoError(t, err) + + key1UpdatedTagsToStore := []storage.Tag{{Name: "tagName1"}} + + operations := []storage.Operation{ + {Key: "key1", Value: []byte("value1_new"), Tags: key1UpdatedTagsToStore}, + {Key: "key1"}, + } + + err = store.Batch(operations) + require.NoError(t, err) + + // Check and make sure the value can't be found now + + value, err := store.Get("key1") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, value) + tags, err := store.GetTags("key1") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, tags) + }) + t.Run("Success: update previously-stored value, then delete it, "+ + "then put it in again using the same key from the first operation, "+ + "all in the same Batch call", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{ + "tagName1", "tagName2", "tagName3", + "tagName2_new", "tagName3_new", + }}) + require.NoError(t, err) + + err = store.Put("key1", []byte("value1"), []storage.Tag{{Name: "tagName1", Value: "tagValue1"}}...) + require.NoError(t, err) + + key1UpdatedTagsToStore := []storage.Tag{{Name: "tagName1"}} + key1SecondUpdatedTagsToStore := []storage.Tag{{Name: "tagName2"}} + + operations := []storage.Operation{ + {Key: "key1", Value: []byte("value1_new"), Tags: key1UpdatedTagsToStore}, + {Key: "key1"}, + {Key: "key1", Value: []byte("value1_new2"), Tags: key1SecondUpdatedTagsToStore}, + } + + err = store.Batch(operations) + require.NoError(t, err) + + // Check and make sure that the third operation effectively "overrode" the first two + // from the user's perspective. + + value, err := store.Get("key1") + require.NoError(t, err) + require.Equal(t, "value1_new2", string(value)) + retrievedTags, err := store.GetTags("key1") + require.True(t, equalTags(key1SecondUpdatedTagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + }) + t.Run("Success: put values in one batch call, then delete in a second batch call, then put again using "+ + "the same keys that were used in the first batch call in a third batch call", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3"}}) + require.NoError(t, err) + + operations := []storage.Operation{ + {Key: "key1", Value: []byte("value1"), Tags: []storage.Tag{{Name: "tagName1", Value: "tagValue1"}}}, + {Key: "key2", Value: []byte("value2"), Tags: []storage.Tag{{Name: "tagName2", Value: "tagValue2"}}}, + {Key: "key3", Value: []byte("value3"), Tags: []storage.Tag{{Name: "tagName3", Value: "tagValue3"}}}, + } + + err = store.Batch(operations) + require.NoError(t, err) + + operations = []storage.Operation{ + {Key: "key1", Value: nil}, + {Key: "key2", Value: nil}, + {Key: "key3", Value: nil}, + } + + err = store.Batch(operations) + require.NoError(t, err) + + key1FinalTagsToStore := []storage.Tag{{Name: "tagName1_new", Value: "tagValue1_new"}} + key2FinalTagsToStore := []storage.Tag{{Name: "tagName2_new", Value: "tagValue2_new"}} + key3FinalTagsToStore := []storage.Tag{{Name: "tagName3_new", Value: "tagValue3_new"}} + + operations = []storage.Operation{ + {Key: "key1", Value: []byte("value1_new"), Tags: key1FinalTagsToStore}, + {Key: "key2", Value: []byte("value2_new"), Tags: key2FinalTagsToStore}, + {Key: "key3", Value: []byte("value3_new"), Tags: key3FinalTagsToStore}, + } + + err = store.Batch(operations) + require.NoError(t, err) + + // Check and make sure the new values were stored + + value, err := store.Get("key1") + require.NoError(t, err) + require.Equal(t, "value1_new", string(value)) + retrievedTags, err := store.GetTags("key1") + require.True(t, equalTags(key1FinalTagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + + value, err = store.Get("key2") + require.NoError(t, err) + require.Equal(t, "value2_new", string(value)) + retrievedTags, err = store.GetTags("key2") + require.True(t, equalTags(key2FinalTagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + + value, err = store.Get("key3") + require.NoError(t, err) + require.Equal(t, "value3_new", string(value)) + retrievedTags, err = store.GetTags("key3") + require.True(t, equalTags(key3FinalTagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + }) + t.Run("Failure: Operations slice is nil", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Batch(nil) + require.Error(t, err) + }) + t.Run("Failure: Operations slice is empty", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Batch([]storage.Operation{}) + require.Error(t, err) + }) + t.Run("Failure: Operation has an empty key", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + operations := []storage.Operation{ + {Key: "key1", Value: []byte("value1"), Tags: []storage.Tag{{Name: "tagName1", Value: "tagValue1"}}}, + {Key: "", Value: []byte("value2"), Tags: []storage.Tag{{Name: "tagName2", Value: "tagValue2"}}}, + } + + err = store.Batch(operations) + require.Error(t, err) + }) +} + +// StoreFlush tests common Store Flush functionality. +func StoreFlush(t *testing.T, provider storage.Provider) { + t.Run("Success", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put("key1", []byte("value1")) + require.NoError(t, err) + + err = store.Put("key2", []byte("value2")) + require.NoError(t, err) + + err = store.Flush() + require.NoError(t, err) + + values, err := store.GetBulk("key1", "key2") + require.NoError(t, err) + require.Len(t, values, 2) + require.Equal(t, "value1", string(values[0])) + require.Equal(t, "value2", string(values[1])) + }) +} + +// StoreClose tests common Store Close functionality. +func StoreClose(t *testing.T, provider storage.Provider) { + t.Run("Successfully close store", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + err = store.Close() + require.NoError(t, err) + }) + t.Run("Close same store multiple times without error", func(t *testing.T) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + require.NotNil(t, store) + + err = store.Close() + require.NoError(t, err) + + err = store.Close() + require.NoError(t, err) + + err = store.Close() + require.NoError(t, err) + }) +} + +func doPutThenGetTest(t *testing.T, provider storage.Provider, key string, value []byte) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put(key, value) + require.NoError(t, err) + + retrievedValue, err := store.Get(key) + require.NoError(t, err) + require.Equal(t, value, retrievedValue) +} + +type testStruct struct { + String string `json:"string"` + + Test1Bool bool `json:"test1Bool"` + Test2Bool bool `json:"test2Bool"` + + BigNegativeInt32 int32 `json:"bigNegativeInt32"` + SmallNegativeInt32 int32 `json:"smallNegativeInt32"` + ZeroInt32 int32 `json:"zeroInt32"` + SmallPositiveInt32 int32 `json:"smallPositiveInt32"` + BigPositiveInt32 int32 `json:"bigPositiveInt32"` + + BigNegativeInt64 int64 `json:"bigNegativeInt64"` + SmallNegativeInt64 int64 `json:"smallNegativeInt64"` + ZeroInt64 int64 `json:"zeroInt64"` + SmallPositiveInt64 int64 `json:"smallPositiveInt64"` + BigPositiveInt64 int64 `json:"bigPositiveInt64"` + + Test1Float32 float32 `json:"test1Float32"` + Test2Float32 float32 `json:"test2Float32"` + Test3Float32 float32 `json:"test3Float32"` + Test4Float32 float32 `json:"test4Float32"` + Test5Float32 float32 `json:"test5Float32"` + ZeroFloat32 float32 `json:"zeroFloat32"` + + Test1Float64 float64 `json:"test1Float64"` + Test2Float64 float64 `json:"test2Float64"` + Test3Float64 float64 `json:"test3Float64"` + Test4Float64 float64 `json:"test4Float64"` + Test5Float64 float32 `json:"test5Float64"` + ZeroFloat64 float64 `json:"zeroFloat64"` +} + +func doPutThenGetTestWithJSONFormattedObject(t *testing.T, provider storage.Provider, key string) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + storedTestData := storeTestJSONData(t, store, key) + + retrievedValue, err := store.Get(key) + require.NoError(t, err) + + checkIfTestStructsMatch(t, retrievedValue, &storedTestData) +} + +func doPutThenUpdateThenGetTest(t *testing.T, provider storage.Provider, key string, value, updatedValue []byte) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = store.Put(key, value) + require.NoError(t, err) + + err = store.Put(key, updatedValue) + require.NoError(t, err) + + retrievedValue, err := store.Get(key) + require.NoError(t, err) + require.Equal(t, updatedValue, retrievedValue) +} + +func doPutThenUpdateThenGetTestWithJSONFormattedObject(t *testing.T, provider storage.Provider, key string) { + store, err := provider.OpenStore(randomStoreName()) + require.NoError(t, err) + + defer func() { + require.NoError(t, store.Close()) + }() + + storedTestData := storeTestJSONData(t, store, key) + + storedTestData.String = "Some new string here" + storedTestData.Test1Bool = true + storedTestData.BigNegativeInt32 = -12345 //nolint:gomnd // Test file + storedTestData.BigPositiveInt64 = 90000004 + storedTestData.Test3Float32 = 7.42 + storedTestData.Test3Float64 = -72.4208 //nolint:gomnd // Test file + + testDataBytes, err := json.Marshal(storedTestData) + require.NoError(t, err) + + err = store.Put(key, testDataBytes) + require.NoError(t, err) + + retrievedValue, err := store.Get(key) + require.NoError(t, err) + + checkIfTestStructsMatch(t, retrievedValue, &storedTestData) +} + +func storeTestJSONData(t *testing.T, store storage.Store, key string) testStruct { + testData := testStruct{ + String: "Some string here", + + Test1Bool: false, + Test2Bool: true, + + BigNegativeInt32: -2147483648, + SmallNegativeInt32: -3, + ZeroInt32: 0, + SmallPositiveInt32: 3, //nolint:gomnd // Test file + BigPositiveInt32: 2147483647, //nolint:gomnd // Test file + + BigNegativeInt64: -9223372036854775808, + SmallNegativeInt64: -3, + ZeroInt64: 0, + SmallPositiveInt64: 3, //nolint:gomnd // Test file + BigPositiveInt64: 9223372036854775807, //nolint:gomnd // Test file + + Test1Float32: 1.3, + Test2Float32: 16, //nolint:gomnd // Test file + Test3Float32: 1.5869797, + Test4Float32: 239.902, //nolint:gomnd // Test file + Test5Float32: -239.902, + ZeroFloat32: 0.00, //nolint:gomnd // Test file + + Test1Float64: 0.12345678912345678, //nolint:gomnd // Test file + Test2Float64: -478.875321, + Test3Float64: 123456789, //nolint:gomnd // Test file + Test4Float64: 1.00000004, + Test5Float64: -239.902, + ZeroFloat64: 0.0000, //nolint:gomnd // Test file + } + + testDataBytes, err := json.Marshal(testData) + require.NoError(t, err) + + err = store.Put(key, testDataBytes) + require.NoError(t, err) + + return testData +} + +func checkIfTestStructsMatch(t *testing.T, retrievedValue []byte, storedTestData *testStruct) { + var retrievedTestData testStruct + + err := json.Unmarshal(retrievedValue, &retrievedTestData) + require.NoError(t, err) + + require.Equal(t, storedTestData.String, retrievedTestData.String) + + require.Equal(t, storedTestData.Test1Bool, retrievedTestData.Test1Bool) + require.Equal(t, storedTestData.Test2Bool, retrievedTestData.Test2Bool) + + require.Equal(t, storedTestData.BigNegativeInt32, retrievedTestData.BigNegativeInt32) + require.Equal(t, storedTestData.SmallNegativeInt32, retrievedTestData.SmallNegativeInt32) + require.Equal(t, storedTestData.ZeroInt32, retrievedTestData.ZeroInt32) + require.Equal(t, storedTestData.SmallPositiveInt32, retrievedTestData.SmallPositiveInt32) + require.Equal(t, storedTestData.BigPositiveInt32, retrievedTestData.BigPositiveInt32) + + require.Equal(t, storedTestData.BigNegativeInt64, retrievedTestData.BigNegativeInt64) + require.Equal(t, storedTestData.SmallNegativeInt64, retrievedTestData.SmallNegativeInt64) + require.Equal(t, storedTestData.ZeroInt64, retrievedTestData.ZeroInt64) + require.Equal(t, storedTestData.SmallPositiveInt64, retrievedTestData.SmallPositiveInt64) + require.Equal(t, storedTestData.BigPositiveInt64, retrievedTestData.BigPositiveInt64) + + require.Equal(t, storedTestData.Test1Float32, retrievedTestData.Test1Float32) + require.Equal(t, storedTestData.Test2Float32, retrievedTestData.Test2Float32) + require.Equal(t, storedTestData.Test3Float32, retrievedTestData.Test3Float32) + require.Equal(t, storedTestData.Test4Float32, retrievedTestData.Test4Float32) + require.Equal(t, storedTestData.ZeroFloat32, retrievedTestData.ZeroFloat32) + + require.Equal(t, storedTestData.Test1Float64, retrievedTestData.Test1Float64) + require.Equal(t, storedTestData.Test2Float64, retrievedTestData.Test2Float64) + require.Equal(t, storedTestData.Test3Float64, retrievedTestData.Test3Float64) + require.Equal(t, storedTestData.Test4Float64, retrievedTestData.Test4Float64) + require.Equal(t, storedTestData.ZeroFloat64, retrievedTestData.ZeroFloat64) +} + +func doStoreQueryTests(t *testing.T, // nolint: funlen,gocognit,gocyclo // Test file + provider storage.Provider, setStoreConfig bool) { + t.Run("Tag name only query - 2 values found", func(t *testing.T) { + keysToPut := []string{"key1", "key2", "key3"} + valuesToPut := [][]byte{[]byte("value1"), []byte("value2"), []byte("value3")} + tagsToPut := [][]storage.Tag{ + {{Name: "tagName1", Value: "tagValue1"}, {Name: "tagName2", Value: "tagValue2"}}, + {{Name: "tagName3", Value: "tagValue"}, {Name: "tagName4"}}, + {{Name: "tagName3", Value: "tagValue2"}}, + } + + expectedKeys := []string{keysToPut[1], keysToPut[2]} + expectedValues := [][]byte{valuesToPut[1], valuesToPut[2]} + expectedTags := [][]storage.Tag{tagsToPut[1], tagsToPut[2]} + expectedTotalItemsCount := 2 + + queryExpression := "tagName3" + + t.Run("Default page setting", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + iterator, err := store.Query(queryExpression) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, false, + true, expectedTotalItemsCount) + }) + t.Run("Page size 2", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + //nolint:gomnd // Test file + iterator, err := store.Query(queryExpression, storage.WithPageSize(2)) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, false, + true, expectedTotalItemsCount) + }) + t.Run("Page size 1", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + iterator, err := store.Query(queryExpression, storage.WithPageSize(1)) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, false, + true, expectedTotalItemsCount) + }) + t.Run("Page size 100", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + //nolint:gomnd // Test file + iterator, err := store.Query(queryExpression, storage.WithPageSize(100)) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, false, + true, expectedTotalItemsCount) + }) + }) + t.Run("Tag name only query - 0 values found", func(t *testing.T) { + keysToPut := []string{"key1", "key2", "key3"} + valuesToPut := [][]byte{[]byte("value1"), []byte("value2"), []byte("value3")} + tagsToPut := [][]storage.Tag{ + {{Name: "tagName1", Value: "tagValue1"}, {Name: "tagName2", Value: "tagValue2"}}, + {{Name: "tagName3", Value: "tagValue"}, {Name: "tagName4"}}, + {{Name: "tagName3", Value: "tagValue2"}}, + } + + expectedTotalItemsCount := 0 + + queryExpression := "tagName5" + + t.Run("Default page setting", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4", "tagName5"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + iterator, err := store.Query(queryExpression) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, false, + true, expectedTotalItemsCount) + }) + t.Run("Page size 2", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4", "tagName5"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + //nolint:gomnd // Test file + iterator, err := store.Query(queryExpression, storage.WithPageSize(2)) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, + false, true, expectedTotalItemsCount) + }) + t.Run("Page size 1", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4", "tagName5"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + iterator, err := store.Query(queryExpression, storage.WithPageSize(1)) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, false, + true, expectedTotalItemsCount) + }) + t.Run("Page size 100", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4", "tagName5"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + //nolint:gomnd // Test file + iterator, err := store.Query(queryExpression, storage.WithPageSize(100)) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, false, + true, expectedTotalItemsCount) + }) + }) + t.Run("Tag name and value query - 2 values found", func(t *testing.T) { + keysToPut := []string{"key1", "key2", "key3", "key4"} + valuesToPut := [][]byte{[]byte("value1"), []byte("value2"), []byte("value3"), []byte("value4")} + tagsToPut := [][]storage.Tag{ + {{Name: "tagName1", Value: "tagValue1"}, {Name: "tagName2", Value: "tagValue2"}}, + {{Name: "tagName3", Value: "tagValue1"}, {Name: "tagName4"}}, + {{Name: "tagName3", Value: "tagValue2"}}, + {{Name: "tagName3", Value: "tagValue1"}}, + } + + expectedKeys := []string{keysToPut[1], keysToPut[3]} + expectedValues := [][]byte{valuesToPut[1], valuesToPut[3]} + expectedTags := [][]storage.Tag{tagsToPut[1], tagsToPut[3]} + expectedTotalItemsCount := 2 + + queryExpression := "tagName3:tagValue1" + + t.Run("Default page setting", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + iterator, err := store.Query(queryExpression) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, false, + true, expectedTotalItemsCount) + }) + t.Run("Page size 2", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + //nolint:gomnd // Test file + iterator, err := store.Query(queryExpression, storage.WithPageSize(2)) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, false, + true, expectedTotalItemsCount) + }) + t.Run("Page size 1", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + iterator, err := store.Query(queryExpression, storage.WithPageSize(1)) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, false, + true, expectedTotalItemsCount) + }) + t.Run("Page size 100", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + //nolint:gomnd // Test file + iterator, err := store.Query(queryExpression, storage.WithPageSize(100)) + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, false, + true, expectedTotalItemsCount) + }) + }) + t.Run("Tag name and value query - only 1 value found "+ + "(would have been two, but the other was deleted before the query was executed)", func(t *testing.T) { + keysToPut := []string{"key1", "key2", "key3", "key4"} + valuesToPut := [][]byte{[]byte("value1"), []byte("value2"), []byte("value3"), []byte("value4")} + tagsToPut := [][]storage.Tag{ + {{Name: "tagName1", Value: "tagValue1"}, {Name: "tagName2", Value: "tagValue2"}}, + {{Name: "tagName3", Value: "tagValue1"}, {Name: "tagName4"}}, + {{Name: "tagName3", Value: "tagValue2"}}, + {{Name: "tagName3", Value: "tagValue1"}}, + } + + expectedKeys := []string{keysToPut[3]} + expectedValues := [][]byte{valuesToPut[3]} + expectedTags := [][]storage.Tag{tagsToPut[3]} + expectedTotalItemsCount := 1 + + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4"}}) + require.NoError(t, err) + } + + putData(t, store, keysToPut, valuesToPut, tagsToPut) + + err = store.Delete("key2") + require.NoError(t, err) + + iterator, err := store.Query("tagName3:tagValue1") + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, false, + true, expectedTotalItemsCount) + }) + t.Run("Tag name and value query - 0 values found since the store is empty", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3", "tagName4"}}) + require.NoError(t, err) + } + + iterator, err := store.Query("tagName3:tagValue1") + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, false, + true, 0) + }) + t.Run("Invalid expression formats", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, storage.StoreConfiguration{}) + require.NoError(t, err) + } + + t.Run("Empty expression", func(t *testing.T) { + iterator, err := store.Query("") + require.Error(t, err) + require.Empty(t, iterator) + }) + t.Run("Too many colon-separated parts", func(t *testing.T) { + iterator, err := store.Query("name:value:somethingElse") + require.Error(t, err) + require.Empty(t, iterator) + }) + }) +} + +func doStoreQueryWithSortingAndInitialPageOptionsTests(t *testing.T, // nolint: funlen // Test file + provider storage.Provider, setStoreConfig bool) { + t.Run("Sorting by a small numerical tag", func(t *testing.T) { //nolint: dupl // Test file + keysToPutAscendingOrder := []string{ + "key1", "key2", "key3", "key4", "key5", "key6", + "key7", "key8", "key9", "key10", + } + valuesToPutAscendingOrder := [][]byte{ + []byte("value1"), []byte("value2"), []byte("value3"), []byte("value4"), []byte("value5"), []byte("value6"), + []byte("value7"), []byte("value8"), []byte("value9"), []byte("value10"), + } + + // The tag value associated with "numberTag" will determine the sort order. + tagsToPutAscendingOrder := [][]storage.Tag{ + { + {Name: "tagName1", Value: "tagValue1"}, + {Name: "tagName2", Value: "tagValue2"}, + {Name: "numberTag", Value: "1"}, + }, + { + {Name: "tagName1", Value: "tagValue2"}, + {Name: "tagName2"}, + {Name: "numberTag", Value: "2"}, + }, + { + {Name: "tagName1", Value: "tagValue3"}, + {Name: "numberTag", Value: "4"}, + }, + { + {Name: "tagName1", Value: "tagValue4"}, + {Name: "numberTag", Value: "8"}, + }, + { + {Name: "tagName1", Value: "tagValue5"}, + {Name: "numberTag", Value: "10"}, + }, + { + {Name: "tagName1", Value: "tagValue6"}, + {Name: "numberTag", Value: "11"}, + }, + { + {Name: "tagName1", Value: "tagValue7"}, + {Name: "numberTag", Value: "12"}, + }, + { + {Name: "tagName1", Value: "tagValue8"}, + {Name: "numberTag", Value: "20"}, + }, + { + {Name: "tagName1", Value: "tagValue9"}, + {Name: "numberTag", Value: "21"}, + }, + { + {Name: "tagName1", Value: "tagValue10"}, + {Name: "numberTag", Value: "22"}, + }, + } + + storeConfig := storage.StoreConfiguration{TagNames: []string{ + "tagName1", "tagName2", "tagName3", "tagName4", + "numberTag", + }} + + queryExpression := "tagName1" + + expectedTotalItemsCount := 10 + + t.Run("Data inserted in ascending order", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, storeConfig) + require.NoError(t, err) + } + + putData(t, store, keysToPutAscendingOrder, valuesToPutAscendingOrder, tagsToPutAscendingOrder) + + t.Run("Ascending order", func(t *testing.T) { //nolint: dupl // Test file + // The results should be sorted numerically (and not lexicographically) on the tag values associated + // with "numberTag". The order should go from the smallest number to the biggest. + t.Run("Default page size setting", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + })) + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + }) + t.Run("Page size 3", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at first page (explicitly set)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(0)) + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at second page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(1)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[3], keysToPutAscendingOrder[4], keysToPutAscendingOrder[5], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[7], keysToPutAscendingOrder[8], + keysToPutAscendingOrder[9], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[4], valuesToPutAscendingOrder[5], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[7], valuesToPutAscendingOrder[8], + valuesToPutAscendingOrder[9], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[4], tagsToPutAscendingOrder[5], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[7], tagsToPutAscendingOrder[8], + tagsToPutAscendingOrder[9], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at third page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(2)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[6], keysToPutAscendingOrder[7], keysToPutAscendingOrder[8], + keysToPutAscendingOrder[9], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[7], valuesToPutAscendingOrder[8], + valuesToPutAscendingOrder[9], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[7], tagsToPutAscendingOrder[8], + tagsToPutAscendingOrder[9], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at fifth page (but there should only be four pages max, "+ + "so iterator should have no results)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(4)) // nolint: gomnd // Test file + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, true, + true, expectedTotalItemsCount) + }) + }) + }) + t.Run("Descending order", func(t *testing.T) { + // The results should be sorted numerically (and not lexicographically) on the tag values associated + // with "numberTag". The order should go from the biggest number to the smallest. + t.Run("Default page size setting", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + querySortOption := storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }) + + iterator, err := store.Query(queryExpression, querySortOption) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + }) + t.Run("Page size 3", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), storage.WithPageSize(3)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at first page (explicitly set)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(0)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at second page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(1)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at third page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(2)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at fifth page(but there should only be four pages max, "+ + "so iterator should have no results)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(4)) // nolint: gomnd // Test file + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, true, + true, expectedTotalItemsCount) + }) + }) + }) + }) + t.Run("Data inserted in arbitrary order", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, storeConfig) + require.NoError(t, err) + } + + keysToPutArbitraryOrder := []string{ + keysToPutAscendingOrder[5], keysToPutAscendingOrder[1], keysToPutAscendingOrder[9], + keysToPutAscendingOrder[0], keysToPutAscendingOrder[4], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[2], keysToPutAscendingOrder[8], keysToPutAscendingOrder[6], + keysToPutAscendingOrder[3], + } + valuesToPutArbitraryOrder := [][]byte{ + valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[1], valuesToPutAscendingOrder[9], + valuesToPutAscendingOrder[0], valuesToPutAscendingOrder[4], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[6], + valuesToPutAscendingOrder[3], + } + tagsToPutArbitraryOrder := [][]storage.Tag{ + tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[1], tagsToPutAscendingOrder[9], + tagsToPutAscendingOrder[0], tagsToPutAscendingOrder[4], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[6], + tagsToPutAscendingOrder[3], + } + + putData(t, store, keysToPutArbitraryOrder, valuesToPutArbitraryOrder, tagsToPutArbitraryOrder) + + t.Run("Ascending order", func(t *testing.T) { //nolint: dupl // Test file + // The results should be sorted numerically (and not lexicographically) on the tag values associated + // with "numberTag". The order should go from the smallest number to the biggest. + t.Run("Default page size setting", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + })) + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + }) + t.Run("Page size 3", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at first page (explicitly set)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(0)) + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at second page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(1)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[3], keysToPutAscendingOrder[4], keysToPutAscendingOrder[5], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[7], keysToPutAscendingOrder[8], + keysToPutAscendingOrder[9], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[4], valuesToPutAscendingOrder[5], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[7], valuesToPutAscendingOrder[8], + valuesToPutAscendingOrder[9], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[4], tagsToPutAscendingOrder[5], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[7], tagsToPutAscendingOrder[8], + tagsToPutAscendingOrder[9], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at third page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(2)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[6], keysToPutAscendingOrder[7], keysToPutAscendingOrder[8], + keysToPutAscendingOrder[9], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[7], valuesToPutAscendingOrder[8], + valuesToPutAscendingOrder[9], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[7], tagsToPutAscendingOrder[8], + tagsToPutAscendingOrder[9], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at fifth page(but there should only be four pages max, "+ + "so iterator should have no results)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(4)) // nolint: gomnd // Test file + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, true, + true, expectedTotalItemsCount) + }) + }) + }) + t.Run("Descending order", func(t *testing.T) { + // The results should be sorted numerically (and not lexicographically) on the tag values associated with + // "numberTag". The order should go from the biggest number to the smallest. + t.Run("Default page size setting", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + querySortOption := storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }) + + iterator, err := store.Query(queryExpression, querySortOption) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + }) + t.Run("Page size 3", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at first page (explicitly set)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(0)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at second page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(1)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at third page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(2)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at fifth page"+ + "(but there should only be four pages max, so iterator should have no results)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(4)) // nolint: gomnd // Test file + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, true, + true, expectedTotalItemsCount) + }) + }) + }) + }) + }) + t.Run("Sorting by a large numerical tag (Unix timestamps)", func(t *testing.T) { //nolint: dupl // Test file + keysToPutAscendingOrder := []string{ + "key1", "key2", "key3", "key4", "key5", "key6", + "key7", "key8", "key9", "key10", + } + valuesToPutAscendingOrder := [][]byte{ + []byte("value1"), []byte("value2"), []byte("value3"), []byte("value4"), []byte("value5"), []byte("value6"), + []byte("value7"), []byte("value8"), []byte("value9"), []byte("value10"), + } + + // The tag value associated with "numberTag" will determine the sort order. + tagsToPutAscendingOrder := [][]storage.Tag{ + { + {Name: "tagName1", Value: "tagValue1"}, + {Name: "tagName2", Value: "tagValue2"}, + {Name: "numberTag", Value: "0"}, + }, + { + {Name: "tagName1", Value: "tagValue2"}, + {Name: "tagName2"}, + {Name: "numberTag", Value: "1234"}, + }, + { + {Name: "tagName1", Value: "tagValue3"}, + {Name: "numberTag", Value: "140000"}, + }, + { + {Name: "tagName1", Value: "tagValue4"}, + {Name: "numberTag", Value: "1000000000"}, + }, + { + {Name: "tagName1", Value: "tagValue5"}, + {Name: "numberTag", Value: "1619022042"}, + }, + { + {Name: "tagName1", Value: "tagValue6"}, + {Name: "numberTag", Value: "1619022043"}, + }, + { + {Name: "tagName1", Value: "tagValue7"}, + {Name: "numberTag", Value: "1619022044"}, + }, + { + {Name: "tagName1", Value: "tagValue8"}, + {Name: "numberTag", Value: "1619122040"}, + }, + { + {Name: "tagName1", Value: "tagValue9"}, + {Name: "numberTag", Value: "1619122041"}, + }, + { + {Name: "tagName1", Value: "tagValue10"}, + {Name: "numberTag", Value: "92147483647"}, + }, + } + + storeConfig := storage.StoreConfiguration{TagNames: []string{ + "tagName1", "tagName2", "tagName3", "tagName4", "numberTag", + }} + + queryExpression := "tagName1" + + expectedTotalItemsCount := 10 + + t.Run("Data inserted in ascending order", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, storeConfig) + require.NoError(t, err) + } + + putData(t, store, keysToPutAscendingOrder, valuesToPutAscendingOrder, tagsToPutAscendingOrder) + + t.Run("Ascending order", func(t *testing.T) { //nolint: dupl // Test file + // The results should be sorted numerically (and not lexicographically) on the tag values associated + // with "numberTag". The order should go from the smallest number to the biggest. + t.Run("Default page size setting", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + })) + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + }) + t.Run("Page size 3", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at first page (explicitly set)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(0)) + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at second page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(1)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[3], keysToPutAscendingOrder[4], keysToPutAscendingOrder[5], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[7], keysToPutAscendingOrder[8], + keysToPutAscendingOrder[9], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[4], valuesToPutAscendingOrder[5], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[7], valuesToPutAscendingOrder[8], + valuesToPutAscendingOrder[9], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[4], tagsToPutAscendingOrder[5], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[7], tagsToPutAscendingOrder[8], + tagsToPutAscendingOrder[9], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at third page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(2)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[6], keysToPutAscendingOrder[7], keysToPutAscendingOrder[8], + keysToPutAscendingOrder[9], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[7], valuesToPutAscendingOrder[8], + valuesToPutAscendingOrder[9], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[7], tagsToPutAscendingOrder[8], + tagsToPutAscendingOrder[9], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at fifth page(but there should only be four pages max, "+ + "so iterator should have no results)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(4)) // nolint: gomnd // Test file + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, true, + true, expectedTotalItemsCount) + }) + }) + }) + t.Run("Descending order", func(t *testing.T) { + // The results should be sorted numerically (and not lexicographically) on the tag values associated with + // "numberTag". The order should go from the biggest number to the smallest. + t.Run("Default page size setting", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + querySortOption := storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }) + + iterator, err := store.Query(queryExpression, querySortOption) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + }) + t.Run("Page size 3", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at first page (explicitly set)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(0)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at second page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(1)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at third page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(2)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at fifth page(but there should only be four pages max, "+ + "so iterator should have no results)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(4)) // nolint: gomnd // Test file + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, true, + true, expectedTotalItemsCount) + }) + }) + }) + }) + t.Run("Data inserted in arbitrary order", func(t *testing.T) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + if setStoreConfig { + err = provider.SetStoreConfig(storeName, storeConfig) + require.NoError(t, err) + } + + keysToPutArbitraryOrder := []string{ + keysToPutAscendingOrder[5], keysToPutAscendingOrder[1], keysToPutAscendingOrder[9], + keysToPutAscendingOrder[0], keysToPutAscendingOrder[4], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[2], keysToPutAscendingOrder[8], keysToPutAscendingOrder[6], + keysToPutAscendingOrder[3], + } + valuesToPutArbitraryOrder := [][]byte{ + valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[1], valuesToPutAscendingOrder[9], + valuesToPutAscendingOrder[0], valuesToPutAscendingOrder[4], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[6], + valuesToPutAscendingOrder[3], + } + tagsToPutArbitraryOrder := [][]storage.Tag{ + tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[1], tagsToPutAscendingOrder[9], + tagsToPutAscendingOrder[0], tagsToPutAscendingOrder[4], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[6], + tagsToPutAscendingOrder[3], + } + + putData(t, store, keysToPutArbitraryOrder, valuesToPutArbitraryOrder, tagsToPutArbitraryOrder) + + t.Run("Ascending order", func(t *testing.T) { //nolint: dupl // Test file + // The results should be sorted numerically (and not lexicographically) on the tag values associated + // with "numberTag". The order should go from the smallest number to the biggest. + t.Run("Default page size setting", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + })) + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + }) + t.Run("Page size 3", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at first page (explicitly set)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(0)) + require.NoError(t, err) + + expectedKeys := keysToPutAscendingOrder + expectedValues := valuesToPutAscendingOrder + expectedTags := tagsToPutAscendingOrder + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at second page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(1)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[3], keysToPutAscendingOrder[4], keysToPutAscendingOrder[5], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[7], keysToPutAscendingOrder[8], + keysToPutAscendingOrder[9], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[4], valuesToPutAscendingOrder[5], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[7], valuesToPutAscendingOrder[8], + valuesToPutAscendingOrder[9], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[4], tagsToPutAscendingOrder[5], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[7], tagsToPutAscendingOrder[8], + tagsToPutAscendingOrder[9], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at third page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(2)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[6], keysToPutAscendingOrder[7], keysToPutAscendingOrder[8], + keysToPutAscendingOrder[9], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[7], valuesToPutAscendingOrder[8], + valuesToPutAscendingOrder[9], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[7], tagsToPutAscendingOrder[8], + tagsToPutAscendingOrder[9], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at fifth page(but there should only be four pages max, "+ + "so iterator should have no results)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortAscending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(4)) // nolint: gomnd // Test file + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, true, + true, expectedTotalItemsCount) + }) + }) + }) + t.Run("Descending order", func(t *testing.T) { + // The results should be sorted numerically (and not lexicographically) on the tag values associated + // with "numberTag". The order should go from the biggest number to the smallest. + t.Run("Default page size setting", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + querySortOption := storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }) + + iterator, err := store.Query(queryExpression, querySortOption) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + }) + t.Run("Page size 3", func(t *testing.T) { + t.Run("Start at the default (first) page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at first page (explicitly set)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(0)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[9], keysToPutAscendingOrder[8], keysToPutAscendingOrder[7], + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[9], valuesToPutAscendingOrder[8], valuesToPutAscendingOrder[7], + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[9], tagsToPutAscendingOrder[8], tagsToPutAscendingOrder[7], + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at second page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(1)) + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[6], keysToPutAscendingOrder[5], keysToPutAscendingOrder[4], + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[6], valuesToPutAscendingOrder[5], valuesToPutAscendingOrder[4], + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[6], tagsToPutAscendingOrder[5], tagsToPutAscendingOrder[4], + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at third page", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(2)) // nolint: gomnd // Test file + require.NoError(t, err) + + expectedKeys := []string{ + keysToPutAscendingOrder[3], keysToPutAscendingOrder[2], keysToPutAscendingOrder[1], + keysToPutAscendingOrder[0], + } + expectedValues := [][]byte{ + valuesToPutAscendingOrder[3], valuesToPutAscendingOrder[2], valuesToPutAscendingOrder[1], + valuesToPutAscendingOrder[0], + } + expectedTags := [][]storage.Tag{ + tagsToPutAscendingOrder[3], tagsToPutAscendingOrder[2], tagsToPutAscendingOrder[1], + tagsToPutAscendingOrder[0], + } + + verifyExpectedIterator(t, iterator, expectedKeys, expectedValues, expectedTags, true, + true, expectedTotalItemsCount) + }) + t.Run("Start at fifth page(but there should only be four pages max, "+ + "so iterator should have no results)", func(t *testing.T) { + iterator, err := store.Query(queryExpression, + storage.WithSortOrder(&storage.SortOptions{ + Order: storage.SortDescending, + TagName: "numberTag", + }), + storage.WithPageSize(3), // nolint: gomnd // Test file + storage.WithInitialPageNum(4)) // nolint: gomnd // Test file + require.NoError(t, err) + + verifyExpectedIterator(t, iterator, nil, nil, nil, true, + true, expectedTotalItemsCount) + }) + }) + }) + }) + }) +} + +func doBatchTestPutThreeValues(t *testing.T, provider storage.Provider, useNewKeyOptimization bool) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3"}}) + require.NoError(t, err) + + key1TagsToStore := []storage.Tag{{Name: "tagName1"}} + key2TagsToStore := []storage.Tag{{Name: "tagName2"}} + key3TagsToStore := []storage.Tag{{Name: "tagName3"}} + + putOptions := &storage.PutOptions{IsNewKey: useNewKeyOptimization} + + operations := []storage.Operation{ + {Key: "key1", Value: []byte("value1"), Tags: key1TagsToStore, PutOptions: putOptions}, + {Key: "key2", Value: []byte(`{"field":"value"}`), Tags: key2TagsToStore, PutOptions: putOptions}, + {Key: "key3", Value: []byte(`"value3"`), Tags: key3TagsToStore, PutOptions: putOptions}, + } + + err = store.Batch(operations) + require.NoError(t, err) + + // Check and make sure all values and tags were stored + + value, err := store.Get("key1") + require.NoError(t, err) + require.Equal(t, "value1", string(value)) + + retrievedTags, err := store.GetTags("key1") + require.True(t, equalTags(key1TagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + + value, err = store.Get("key2") + require.NoError(t, err) + require.Equal(t, `{"field":"value"}`, string(value)) + + retrievedTags, err = store.GetTags("key2") + require.True(t, equalTags(key2TagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + + value, err = store.Get("key3") + require.NoError(t, err) + require.Equal(t, `"value3"`, string(value)) + + retrievedTags, err = store.GetTags("key3") + require.True(t, equalTags(key3TagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) +} + +func doBatchTestPutOneUpdateOneDeleteOne(t *testing.T, provider storage.Provider, useNewKeyOptimization bool) { + storeName := randomStoreName() + + store, err := provider.OpenStore(storeName) + require.NoError(t, err) + require.NotNil(t, store) + + defer func() { + require.NoError(t, store.Close()) + }() + + err = provider.SetStoreConfig(storeName, + storage.StoreConfiguration{TagNames: []string{"tagName1", "tagName2", "tagName3"}}) + require.NoError(t, err) + + err = store.Put("key1", []byte("value1"), []storage.Tag{{Name: "tagName1", Value: "tagValue1"}}...) + require.NoError(t, err) + + err = store.Put("key2", []byte("value2"), []storage.Tag{{Name: "tagName2", Value: "tagValue2"}}...) + require.NoError(t, err) + + key3TagsToStore := []storage.Tag{{Name: "tagName3", Value: "tagValue3"}} + + key1UpdatedTagsToStore := []storage.Tag{{Name: "tagName1"}} + + putOptions := &storage.PutOptions{IsNewKey: useNewKeyOptimization} + + operations := []storage.Operation{ + {Key: "key3", Value: []byte("value3"), Tags: key3TagsToStore, PutOptions: putOptions}, // Put + {Key: "key1", Value: []byte("value1_new"), Tags: key1UpdatedTagsToStore}, // Update + {Key: "key2", Value: nil, Tags: nil}, // Delete + } + + err = store.Batch(operations) + require.NoError(t, err) + + value, err := store.Get("key3") + require.NoError(t, err) + require.Equal(t, "value3", string(value)) + + retrievedTags, err := store.GetTags("key3") + require.True(t, equalTags(key3TagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + + value, err = store.Get("key1") + require.NoError(t, err) + require.Equal(t, "value1_new", string(value)) + + retrievedTags, err = store.GetTags("key1") + require.True(t, equalTags(key1UpdatedTagsToStore, retrievedTags), "Got unexpected tags") + require.NoError(t, err) + + value, err = store.Get("key2") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, value) + + retrievedTags, err = store.GetTags("key2") + require.True(t, errors.Is(err, storage.ErrDataNotFound), "got unexpected error or no error") + require.Nil(t, retrievedTags) +} + +func randomStoreName() string { + return "store-" + uuid.NewString() +} + +func putData(t *testing.T, store storage.Store, keys []string, values [][]byte, tags [][]storage.Tag) { + t.Helper() + + for i := 0; i < len(keys); i++ { + err := store.Put(keys[i], values[i], tags[i]...) + require.NoError(t, err) + } +} + +// expectedKeys, expectedValues, and expectedTags are with respect to the query's page settings. +// Since Iterator.TotalItems' count is not affected by page settings, expectedTotalItemsCount must be passed in and +// can't be determined by looking at the length of expectedKeys, expectedValues, nor expectedTags. +func verifyExpectedIterator(t *testing.T, actualResultsItr storage.Iterator, expectedKeys []string, expectedValues [][]byte, + expectedTags [][]storage.Tag, orderMatters, checkTotalItemsCount bool, expectedTotalItemsCount int) { + if len(expectedValues) != len(expectedKeys) || len(expectedTags) != len(expectedKeys) { + require.FailNow(t, + "Invalid test case. Expected keys, values and tags slices must be the same length.") + } + + if orderMatters { + verifyIteratorInOrder(t, actualResultsItr, expectedKeys, expectedValues, expectedTags, checkTotalItemsCount, + expectedTotalItemsCount) + } else { + verifyIteratorAnyOrder(t, actualResultsItr, expectedKeys, expectedValues, expectedTags, checkTotalItemsCount, + expectedTotalItemsCount) + } +} + +func verifyIteratorAnyOrder(t *testing.T, actualResultsItr storage.Iterator, //nolint: gocyclo,funlen // Test file + expectedKeys []string, expectedValues [][]byte, expectedTags [][]storage.Tag, + checkTotalItemsCount bool, expectedTotalItemsCount int) { + var dataChecklist struct { + keys []string + values [][]byte + tags [][]storage.Tag + received []bool + } + + dataChecklist.keys = expectedKeys + dataChecklist.values = expectedValues + dataChecklist.tags = expectedTags + dataChecklist.received = make([]bool, len(expectedKeys)) + + moreResultsToCheck, err := actualResultsItr.Next() + require.NoError(t, err) + + if !moreResultsToCheck && len(expectedKeys) != 0 { + require.FailNow(t, "query unexpectedly returned no results") + } + + for moreResultsToCheck { + dataReceivedCount := 0 + + for _, received := range dataChecklist.received { + if received { + dataReceivedCount++ + } + } + + if dataReceivedCount == len(dataChecklist.received) { + require.FailNow(t, "iterator contains more results than expected") + } + + var itrErr error + receivedKey, itrErr := actualResultsItr.Key() + require.NoError(t, itrErr) + + receivedValue, itrErr := actualResultsItr.Value() + require.NoError(t, itrErr) + + receivedTags, itrErr := actualResultsItr.Tags() + require.NoError(t, itrErr) + + for i := 0; i < len(dataChecklist.keys); i++ { + if receivedKey == dataChecklist.keys[i] { + if string(receivedValue) == string(dataChecklist.values[i]) { + if equalTags(receivedTags, dataChecklist.tags[i]) { + dataChecklist.received[i] = true + + break + } + } + } + } + + moreResultsToCheck, err = actualResultsItr.Next() + require.NoError(t, err) + } + + if checkTotalItemsCount { + count, errTotalItems := actualResultsItr.TotalItems() + require.NoError(t, errTotalItems) + require.Equal(t, expectedTotalItemsCount, count) + } + + err = actualResultsItr.Close() + require.NoError(t, err) + + for _, received := range dataChecklist.received { + if !received { + require.FailNow(t, "received unexpected query results") + } + } +} + +func verifyIteratorInOrder(t *testing.T, actualResultsItr storage.Iterator, + expectedKeys []string, expectedValues [][]byte, expectedTags [][]storage.Tag, + checkTotalItemsCount bool, expectedTotalItemsCount int) { + moreResultsToCheck, err := actualResultsItr.Next() + require.NoError(t, err) + + if !moreResultsToCheck && len(expectedKeys) != 0 { + require.FailNow(t, "query unexpectedly returned no results") + } + + var currentIndex int + + for moreResultsToCheck { + var itrErr error + receivedKey, itrErr := actualResultsItr.Key() + require.NoError(t, itrErr) + require.Equal(t, expectedKeys[currentIndex], receivedKey) + + receivedValue, itrErr := actualResultsItr.Value() + require.NoError(t, itrErr) + require.Equal(t, string(expectedValues[currentIndex]), string(receivedValue)) + + receivedTags, itrErr := actualResultsItr.Tags() + require.NoError(t, itrErr) + require.True(t, equalTags(receivedTags, expectedTags[currentIndex]), + "received unexpected query results") + + moreResultsToCheck, err = actualResultsItr.Next() + require.NoError(t, err) + + if moreResultsToCheck { + currentIndex++ + + if currentIndex+1 > len(expectedKeys) { + require.FailNow(t, "query returned too many results") + } + } + } + + if checkTotalItemsCount { + count, errTotalItems := actualResultsItr.TotalItems() + require.NoError(t, errTotalItems) + require.Equal(t, expectedTotalItemsCount, count) + } + + err = actualResultsItr.Close() + require.NoError(t, err) +} + +func equalTags(tags1, tags2 []storage.Tag) bool { //nolint:gocyclo // Test file + if len(tags1) != len(tags2) { + return false + } + + matchedTags1 := make([]bool, len(tags1)) + matchedTags2 := make([]bool, len(tags2)) + + for i, tag1 := range tags1 { + for j, tag2 := range tags2 { + if matchedTags2[j] { + continue // This tag has already found a match. Tags can only have one match! + } + + if tag1.Name == tag2.Name && tag1.Value == tag2.Value { + matchedTags1[i] = true + matchedTags2[j] = true + + break + } + } + + if !matchedTags1[i] { + return false + } + } + + for _, matchedTag := range matchedTags1 { + if !matchedTag { + return false + } + } + + for _, matchedTag := range matchedTags2 { + if !matchedTag { + return false + } + } + + return true +} + +func equalTagNamesAnyOrder(tagNames1, tagNames2 []string) bool { + areTagNamesMatchedFromSlice1 := make([]bool, len(tagNames1)) + areTagNamesMatchedFromSlice2 := make([]bool, len(tagNames2)) + + for i, tagName1 := range tagNames1 { + for j, tagName2 := range tagNames2 { + if areTagNamesMatchedFromSlice2[j] { + continue // This tag name has already found a match. Tag names can only have one match! + } + + if tagName1 == tagName2 { + areTagNamesMatchedFromSlice1[i] = true + areTagNamesMatchedFromSlice2[j] = true + + break + } + } + + if !areTagNamesMatchedFromSlice1[i] { + return false + } + } + + for _, isTagNameMatch := range areTagNamesMatchedFromSlice1 { + if !isTagNameMatch { + return false + } + } + + for _, isTagNameMatch := range areTagNamesMatchedFromSlice2 { + if !isTagNameMatch { + return false + } + } + + return true +} diff --git a/component/wallet-cli/pkg/walletrunner/wallet_runner.go b/component/wallet-cli/pkg/walletrunner/wallet_runner.go index fe45155ea..821e01a4e 100644 --- a/component/wallet-cli/pkg/walletrunner/wallet_runner.go +++ b/component/wallet-cli/pkg/walletrunner/wallet_runner.go @@ -16,7 +16,6 @@ import ( "time" "github.com/henvic/httpretty" - "github.com/hyperledger/aries-framework-go-ext/component/storage/mongodb" "github.com/hyperledger/aries-framework-go-ext/component/vdr/jwk" "github.com/hyperledger/aries-framework-go-ext/component/vdr/longform" "github.com/hyperledger/aries-framework-go-ext/component/vdr/orb" @@ -40,6 +39,7 @@ import ( "github.com/hyperledger/aries-framework-go/spi/secretlock" "github.com/hyperledger/aries-framework-go/spi/storage" jsonld "github.com/piprate/json-gold/ld" + "github.com/trustbloc/vcs/component/wallet-cli/internal/storage/mongodb" "golang.org/x/oauth2" "github.com/trustbloc/vcs/component/wallet-cli/pkg/walletrunner/vcprovider" @@ -188,7 +188,7 @@ func (s *Service) createAgentServices(vcProviderConf *vcprovider.Config) (*aries var storageProvider storage.Provider switch strings.ToLower(s.vcProviderConf.StorageProvider) { case "mongodb": - p, err := mongodb.NewProvider(s.vcProviderConf.StorageProviderConnString) + p, err := mongodb.NewProvider(s.vcProviderConf.StorageProviderConnString, nil) if err != nil { return nil, err } diff --git a/go.mod b/go.mod index 11bc998fb..eb5041121 100644 --- a/go.mod +++ b/go.mod @@ -21,8 +21,6 @@ require ( github.com/golang/mock v1.6.0 github.com/google/tink/go v1.7.0 github.com/google/uuid v1.3.0 - github.com/hyperledger/aries-framework-go v0.3.3-0.20230828151543-984699876d28 - github.com/hyperledger/aries-framework-go-ext/component/storage/mongodb v0.0.0-20220728172020-0a8903e45149 github.com/hyperledger/aries-framework-go-ext/component/vdr/orb v1.0.0-rc5.0.20221201213446-c4c1e76daa49 github.com/hyperledger/aries-framework-go/component/didconfig v0.0.0-20230622211121-852ce35730b4 github.com/hyperledger/aries-framework-go/component/kmscrypto v0.0.0-20230822161205-69119012ed5c @@ -126,6 +124,8 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hyperledger/aries-framework-go v0.3.3-0.20230828151543-984699876d28 // indirect + github.com/hyperledger/aries-framework-go-ext/component/storage/mongodb v0.0.0-20220728172020-0a8903e45149 // indirect github.com/hyperledger/aries-framework-go-ext/component/vdr/sidetree v1.0.0-rc3.0.20221104150937-07bfbe450122 // indirect github.com/hyperledger/aries-framework-go/component/log v0.0.0-20230427134832-0c9969493bd3 // indirect github.com/hyperledger/fabric-amcl v0.0.0-20230602173724-9e02669dceb2 // indirect diff --git a/pkg/kms/arieskms.go b/pkg/kms/arieskms.go index e2aeabaf9..9cb412116 100644 --- a/pkg/kms/arieskms.go +++ b/pkg/kms/arieskms.go @@ -15,7 +15,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/kms" - "github.com/hyperledger/aries-framework-go-ext/component/storage/mongodb" "github.com/hyperledger/aries-framework-go/component/kmscrypto/crypto/tinkcrypto" webcrypto "github.com/hyperledger/aries-framework-go/component/kmscrypto/crypto/webkms" "github.com/hyperledger/aries-framework-go/component/kmscrypto/doc/jose/jwk" @@ -26,8 +25,9 @@ import ( "github.com/hyperledger/aries-framework-go/component/storageutil/mem" kmsapi "github.com/hyperledger/aries-framework-go/spi/kms" "github.com/hyperledger/aries-framework-go/spi/secretlock" - "github.com/hyperledger/aries-framework-go/spi/storage" awssvc "github.com/trustbloc/kms/pkg/aws" + "github.com/trustbloc/vcs/pkg/storage/mongodb" + "github.com/trustbloc/vcs/pkg/storage/mongodb/arieskmsstore" "github.com/trustbloc/vcs/pkg/doc/vc" vcsverifiable "github.com/trustbloc/vcs/pkg/doc/verifiable" @@ -152,12 +152,7 @@ func createLocalKMS(cfg *Config) (keyManager, Crypto, error) { return nil, nil, err } - storeProvider, err := createStoreProvider(cfg.DBType, cfg.DBURL, cfg.DBPrefix) - if err != nil { - return nil, nil, err - } - - kmsStore, err := arieskms.NewAriesProviderWrapper(storeProvider) + kmsStore, err := createStore(cfg.DBType, cfg.DBURL, cfg.DBPrefix) if err != nil { return nil, nil, err } @@ -223,29 +218,20 @@ func createLocalSecretLock(keyPath string) (secretlock.Service, error) { return secretLock, nil } -func createStoreProvider(typ, url, prefix string) (storage.Provider, error) { - var createProvider func(url, prefix string) (storage.Provider, error) - +func createStore(typ, url, prefix string) (kmsapi.Store, error) { switch { case strings.EqualFold(typ, storageTypeMemOption): - createProvider = func(string, string) (storage.Provider, error) { //nolint:unparam - return mem.NewProvider(), nil - } - + return arieskms.NewAriesProviderWrapper(mem.NewProvider()) case strings.EqualFold(typ, storageTypeMongoDBOption): - createProvider = func(url, prefix string) (storage.Provider, error) { - mongoDBProvider, err := mongodb.NewProvider(url, mongodb.WithDBPrefix(prefix)) - if err != nil { - return nil, err - } - - return mongoDBProvider, nil + mongoClient, err := mongodb.New(url, prefix) + if err != nil { + return nil, err } + + return arieskmsstore.NewStore(mongoClient), nil default: return nil, fmt.Errorf("not supported database type: %s", typ) } - - return createProvider(url, prefix) } type kmsProvider struct { diff --git a/pkg/kms/arieskms_test.go b/pkg/kms/arieskms_test.go index a5333ec27..9565e00b5 100644 --- a/pkg/kms/arieskms_test.go +++ b/pkg/kms/arieskms_test.go @@ -91,6 +91,19 @@ func TestNewLocalKeyManager(t *testing.T) { require.NoError(t, err) }) + t.Run("Fail mongodb", func(t *testing.T) { + km, err := kms.NewAriesKeyManager(&kms.Config{ + KMSType: kms.Local, + SecretLockKeyPath: secretLockKeyFile, + DBType: "mongodb", + DBURL: "not a url!", + DBPrefix: "test", + }, nil) + + require.Nil(t, km) + require.Contains(t, err.Error(), "failed to create a new MongoDB client") + }) + t.Run("Incorrect SecretLockKeyPath", func(t *testing.T) { _, err := kms.NewAriesKeyManager(&kms.Config{ KMSType: kms.Local, diff --git a/pkg/storage/mongodb/arieskmsstore/aries_kms_store.go b/pkg/storage/mongodb/arieskmsstore/aries_kms_store.go new file mode 100644 index 000000000..d64edbd2d --- /dev/null +++ b/pkg/storage/mongodb/arieskmsstore/aries_kms_store.go @@ -0,0 +1,95 @@ +/* +Copyright Gen Digital Inc. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package arieskmsstore + +import ( + "errors" + "fmt" + + arieskms "github.com/hyperledger/aries-framework-go/component/kmscrypto/kms" + "github.com/trustbloc/vcs/pkg/storage/mongodb" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +// Store provides local KMS storage using mongodb. +type Store struct { + client *mongodb.Client +} + +const ( + ariesKMSStoreName = "aries_kms_store" +) + +// NewStore initializes a Store. +func NewStore(mongoClient *mongodb.Client) *Store { + return &Store{client: mongoClient} +} + +type dataWrapper struct { + ID string `bson:"_id"` + Bin []byte `bson:"bin,omitempty"` +} + +// Put stores the given key under the given keysetID. Overwrites silently. +func (s *Store) Put(keysetID string, key []byte) error { + coll := s.client.Database().Collection(ariesKMSStoreName) + + ctx, cancel := s.client.ContextWithTimeout() + defer cancel() + + _, err := coll.UpdateByID(ctx, keysetID, &dataWrapper{ + ID: keysetID, + Bin: key, + }, options.Update().SetUpsert(true)) + if err != nil { + return err + } + + return nil +} + +// Get retrieves the key stored under the given keysetID. If no key is found, +// the returned error is expected to wrap ErrKeyNotFound. KMS implementations +// may check to see if the error wraps that error type for certain operations. +func (s *Store) Get(keysetID string) ([]byte, error) { + coll := s.client.Database().Collection(ariesKMSStoreName) + + ctx, cancel := s.client.ContextWithTimeout() + defer cancel() + + result := &dataWrapper{} + + err := coll.FindOne(ctx, bson.M{"_id": keysetID}).Decode(result) + if errors.Is(err, mongo.ErrNoDocuments) { + return nil, fmt.Errorf("%w. Underlying error: %s", + arieskms.ErrKeyNotFound, err.Error()) + } + + if err != nil { + return nil, err + } + + return result.Bin, nil +} + +// Delete deletes the key stored under the given keysetID. A KeyManager will +// assume that attempting to delete a non-existent key will not return an error. +func (s *Store) Delete(keysetID string) error { + coll := s.client.Database().Collection(ariesKMSStoreName) + + ctx, cancel := s.client.ContextWithTimeout() + defer cancel() + + _, err := coll.DeleteOne(ctx, bson.M{"_id": keysetID}) + if err != nil { + return fmt.Errorf("failed to run DeleteOne command in MongoDB: %w", err) + } + + return nil +} diff --git a/pkg/storage/mongodb/cslindexstore/csl_index_store.go b/pkg/storage/mongodb/cslindexstore/csl_index_store.go index 4cee30986..6cb65c330 100644 --- a/pkg/storage/mongodb/cslindexstore/csl_index_store.go +++ b/pkg/storage/mongodb/cslindexstore/csl_index_store.go @@ -12,7 +12,7 @@ import ( "fmt" "github.com/google/uuid" - mongodbext "github.com/hyperledger/aries-framework-go-ext/component/storage/mongodb" + "github.com/trustbloc/vcs/pkg/storage/mongodb/internal" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" @@ -45,7 +45,7 @@ func NewStore(mongoClient *mongodb.Client) *Store { // Upsert does upsert operation of cslWrapper against underlying MongoDB. func (p *Store) Upsert(ctx context.Context, cslURL string, cslWrapper *credentialstatus.CSLIndexWrapper) error { - mongoDBDocument, err := mongodbext.PrepareDataForBSONStorage(cslWrapper) + mongoDBDocument, err := internal.PrepareDataForBSONStorage(cslWrapper) if err != nil { return err } diff --git a/pkg/storage/mongodb/cslvcstore/csl_vc_store.go b/pkg/storage/mongodb/cslvcstore/csl_vc_store.go index 25cb05fcc..eb7bb6d90 100644 --- a/pkg/storage/mongodb/cslvcstore/csl_vc_store.go +++ b/pkg/storage/mongodb/cslvcstore/csl_vc_store.go @@ -12,7 +12,7 @@ import ( "fmt" "net/url" - mongodbext "github.com/hyperledger/aries-framework-go-ext/component/storage/mongodb" + "github.com/trustbloc/vcs/pkg/storage/mongodb/internal" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" @@ -40,7 +40,7 @@ func NewStore(mongoClient *mongodb.Client) *Store { // Upsert does upsert operation of cslWrapper against underlying MongoDB. func (p *Store) Upsert(ctx context.Context, cslURL string, wrapper *credentialstatus.CSLVCWrapper) error { - mongoDBDocument, err := mongodbext.PrepareDataForBSONStorage(wrapper) + mongoDBDocument, err := internal.PrepareDataForBSONStorage(wrapper) if err != nil { return fmt.Errorf("failed to prepare data for BSON storage: %w", err) } diff --git a/pkg/storage/mongodb/internal/util.go b/pkg/storage/mongodb/internal/util.go new file mode 100644 index 000000000..3d50caf01 --- /dev/null +++ b/pkg/storage/mongodb/internal/util.go @@ -0,0 +1,119 @@ +/* +Copyright Gen Digital Inc. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package internal + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +// PrepareDataForBSONStorage takes the given value and converts it to the type expected by the MongoDB driver for +// inserting documents. The value must be a struct with exported fields and proper json tags or a map. To use the +// MongoDB primary key (_id), you must have an _id field in either the struct or map. Alternatively, add it to the +// map returned by this function. If no _id field is set, then MongoDB will generate one for you. +func PrepareDataForBSONStorage(value interface{}) (map[string]interface{}, error) { + valueBytes, err := json.Marshal(value) + if err != nil { + return nil, err + } + + return convertMarshalledValueToMap(valueBytes) +} + +func convertMarshalledValueToMap(valueBytes []byte) (map[string]interface{}, error) { + var unmarshalledValue map[string]interface{} + + jsonDecoder := json.NewDecoder(bytes.NewReader(valueBytes)) + jsonDecoder.UseNumber() + + err := jsonDecoder.Decode(&unmarshalledValue) + if err != nil { + return nil, err + } + + escapedMap, err := escapeMapForDocumentDB(unmarshalledValue) + if err != nil { + return nil, err + } + + return escapedMap, nil +} + +// escapeMapForDocumentDB recursively travels through the given map and ensures that all keys are safe for DocumentDB. +// All "." characters in keys are replaced with "`" characters. +// If any "`" characters are discovered in keys then an error is returned, since this would cause confusion with the +// scheme described above. +func escapeMapForDocumentDB(unescapedMap map[string]interface{}) (map[string]interface{}, error) { + escapedMap := make(map[string]interface{}) + + for unescapedKey, unescapedValue := range unescapedMap { + escapedKey, escapedValue, err := escapeKeyValuePair(unescapedKey, unescapedValue) + if err != nil { + return nil, err + } + + escapedMap[escapedKey] = escapedValue + } + + return escapedMap, nil +} + +func escapeKeyValuePair(unescapedKey string, unescapedValue interface{}) (string, interface{}, + error) { + if strings.Contains(unescapedKey, "`") { + return "", nil, + fmt.Errorf(`JSON keys cannot have "`+"`"+`" characters within them. Invalid key: %s`, unescapedKey) + } + + escapedValue, err := escapeValue(unescapedValue) + if err != nil { + return "", nil, err + } + + return escapeKey(unescapedKey), escapedValue, nil +} + +func escapeKey(unescapedKey string) string { + return strings.ReplaceAll(unescapedKey, ".", "`") +} + +func escapeValue(unescapedValue interface{}) (interface{}, error) { + unescapedValueAsArray, ok := unescapedValue.([]interface{}) + if ok { + return escapeArray(unescapedValueAsArray) + } + + unescapedValueAsMap, ok := unescapedValue.(map[string]interface{}) + if ok { + escapedValue, err := escapeMapForDocumentDB(unescapedValueAsMap) + if err != nil { + return nil, err + } + + return escapedValue, nil + } + + // In this case, the value is not a nested object or array and so doesn't need escaping. + return unescapedValue, nil +} + +func escapeArray(unescapedArray []interface{}) (interface{}, error) { + escapedArray := make([]interface{}, len(unescapedArray)) + + for i, unescapedValueInUnescapedArray := range unescapedArray { + escapedValue, err := escapeValue(unescapedValueInUnescapedArray) + if err != nil { + return nil, err + } + + escapedArray[i] = escapedValue + } + + return escapedArray, nil +} diff --git a/pkg/storage/mongodb/vcstatusstore/vc_status_store.go b/pkg/storage/mongodb/vcstatusstore/vc_status_store.go index 213ef339a..a5a2440ec 100644 --- a/pkg/storage/mongodb/vcstatusstore/vc_status_store.go +++ b/pkg/storage/mongodb/vcstatusstore/vc_status_store.go @@ -11,9 +11,9 @@ import ( "encoding/json" "fmt" - mongodbext "github.com/hyperledger/aries-framework-go-ext/component/storage/mongodb" "github.com/hyperledger/aries-framework-go/component/models/verifiable" "github.com/trustbloc/vcs/pkg/storage/mongodb" + "github.com/trustbloc/vcs/pkg/storage/mongodb/internal" "go.mongodb.org/mongo-driver/bson" ) @@ -50,7 +50,7 @@ func (p *Store) Put( TypedID: typedID, } - mongoDBDocument, err := mongodbext.PrepareDataForBSONStorage(document) + mongoDBDocument, err := internal.PrepareDataForBSONStorage(document) if err != nil { return err } diff --git a/test/bdd/go.mod b/test/bdd/go.mod index 64053acad..2c2f63439 100644 --- a/test/bdd/go.mod +++ b/test/bdd/go.mod @@ -73,6 +73,8 @@ require ( github.com/getkin/kin-openapi v0.94.0 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-jose/go-jose/v3 v3.0.1-0.20221117193127-916db76e8214 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/gofrs/uuid v4.3.0+incompatible // indirect @@ -179,6 +181,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect go.mongodb.org/mongo-driver v1.11.4 // indirect + go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 // indirect go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/atomic v1.9.0 // indirect diff --git a/test/bdd/go.sum b/test/bdd/go.sum index 2f2d656e1..e61d10ece 100644 --- a/test/bdd/go.sum +++ b/test/bdd/go.sum @@ -365,6 +365,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -1191,6 +1192,8 @@ go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 h1:hATJDiGtTPWglqQRlWUiT5df32bOu9AJV41djhfF4Ig= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0/go.mod h1:nkEFz9FW/KZC65rsd8yrHm4aBKa5STMpe4/Xb5+LG64= go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY= diff --git a/test/stress/go.mod b/test/stress/go.mod index ac8a9383c..3efa95155 100644 --- a/test/stress/go.mod +++ b/test/stress/go.mod @@ -64,6 +64,8 @@ require ( github.com/getkin/kin-openapi v0.94.0 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-jose/go-jose/v3 v3.0.1-0.20221117193127-916db76e8214 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect @@ -180,6 +182,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a // indirect go.mongodb.org/mongo-driver v1.11.4 // indirect + go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 // indirect go.opentelemetry.io/otel v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect go.uber.org/atomic v1.9.0 // indirect diff --git a/test/stress/go.sum b/test/stress/go.sum index ca781dd74..2031d2cba 100644 --- a/test/stress/go.sum +++ b/test/stress/go.sum @@ -358,6 +358,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -1175,6 +1176,8 @@ go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0 h1:hATJDiGtTPWglqQRlWUiT5df32bOu9AJV41djhfF4Ig= +go.opentelemetry.io/contrib/instrumentation/go.mongodb.org/mongo-driver/mongo/otelmongo v0.40.0/go.mod h1:nkEFz9FW/KZC65rsd8yrHm4aBKa5STMpe4/Xb5+LG64= go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY=