diff --git a/src/commands/cmd_hll.cc b/src/commands/cmd_hll.cc index 6cde3e61b71..8a9152572a2 100644 --- a/src/commands/cmd_hll.cc +++ b/src/commands/cmd_hll.cc @@ -41,7 +41,8 @@ class CommandPfAdd final : public Commander { hashes[i - 2] = redis::HyperLogLog::HllHash(args_[i]); } uint64_t ret{}; - auto s = hll.Add(args_[1], hashes, &ret); + engine::Context ctx(srv->storage); + auto s = hll.Add(ctx, args_[1], hashes, &ret); if (!s.ok() && !s.IsNotFound()) { return {Status::RedisExecErr, s.ToString()}; } @@ -61,11 +62,12 @@ class CommandPfCount final : public Commander { rocksdb::Status s; // The first argument is the command name, so we need to skip it. DCHECK_GE(args_.size(), 2); + engine::Context ctx(srv->storage); if (args_.size() > 2) { std::vector keys(args_.begin() + 1, args_.end()); - s = hll.CountMultiple(keys, &ret); + s = hll.CountMultiple(ctx, keys, &ret); } else { - s = hll.Count(args_[1], &ret); + s = hll.Count(ctx, args_[1], &ret); } if (!s.ok() && !s.IsNotFound()) { return {Status::RedisExecErr, s.ToString()}; @@ -86,7 +88,8 @@ class CommandPfMerge final : public Commander { redis::HyperLogLog hll(srv->storage, conn->GetNamespace()); DCHECK_GT(args_.size(), 1); std::vector src_user_keys(args_.begin() + 2, args_.end()); - auto s = hll.Merge(/*dest_user_key=*/args_[1], src_user_keys); + engine::Context ctx(srv->storage); + auto s = hll.Merge(ctx, /*dest_user_key=*/args_[1], src_user_keys); if (!s.ok() && !s.IsNotFound()) { return {Status::RedisExecErr, s.ToString()}; } diff --git a/src/storage/redis_db.h b/src/storage/redis_db.h index 088dc38f2f0..7111fed1099 100644 --- a/src/storage/redis_db.h +++ b/src/storage/redis_db.h @@ -74,15 +74,6 @@ class Database { public: static constexpr uint64_t RANDOM_KEY_SCAN_LIMIT = 60; - struct GetOptions { - // If snapshot is not nullptr, read from the specified snapshot, - // otherwise read from the "latest" snapshot. - const rocksdb::Snapshot *snapshot = nullptr; - - GetOptions() = default; - explicit GetOptions(const rocksdb::Snapshot *ss) : snapshot(ss) {} - }; - explicit Database(engine::Storage *storage, std::string ns = ""); /// Parsing metadata with type of `types` from bytes, the metadata is a base class of all metadata. /// When parsing, the bytes will be consumed. diff --git a/src/types/redis_hyperloglog.cc b/src/types/redis_hyperloglog.cc index e6c0e420323..9b9b5a50125 100644 --- a/src/types/redis_hyperloglog.cc +++ b/src/types/redis_hyperloglog.cc @@ -97,9 +97,8 @@ class HllSegmentCache { } }; -rocksdb::Status HyperLogLog::GetMetadata(Database::GetOptions get_options, const Slice &ns_key, - HyperLogLogMetadata *metadata) { - return Database::GetMetadata(get_options, {kRedisHyperLogLog}, ns_key, metadata); +rocksdb::Status HyperLogLog::GetMetadata(engine::Context &ctx, const Slice &ns_key, HyperLogLogMetadata *metadata) { + return Database::GetMetadata(ctx, {kRedisHyperLogLog}, ns_key, metadata); } uint64_t HyperLogLog::HllHash(std::string_view element) { @@ -108,13 +107,14 @@ uint64_t HyperLogLog::HllHash(std::string_view element) { } /* the max 0 pattern counter of the subset the element belongs to is incremented if needed */ -rocksdb::Status HyperLogLog::Add(const Slice &user_key, const std::vector &element_hashes, uint64_t *ret) { +rocksdb::Status HyperLogLog::Add(engine::Context &ctx, const Slice &user_key, + const std::vector &element_hashes, uint64_t *ret) { *ret = 0; std::string ns_key = AppendNamespacePrefix(user_key); LockGuard guard(storage_->GetLockManager(), ns_key); HyperLogLogMetadata metadata{}; - rocksdb::Status s = GetMetadata(GetOptions(), ns_key, &metadata); + rocksdb::Status s = GetMetadata(ctx, ns_key, &metadata); if (!s.ok() && !s.IsNotFound()) { return s; } @@ -131,11 +131,11 @@ rocksdb::Status HyperLogLog::Add(const Slice &user_key, const std::vector rocksdb::Status { + [this, &ns_key, &metadata, &ctx](uint32_t segment_index, std::string *segment) -> rocksdb::Status { std::string sub_key = InternalKey(ns_key, std::to_string(segment_index), metadata.version, storage_->IsSlotIdEncoded()) .Encode(); - return storage_->Get(rocksdb::ReadOptions(), sub_key, segment); + return storage_->Get(ctx, ctx.GetReadOptions(), sub_key, segment); }, &entry); if (!s.ok()) return s; @@ -172,20 +172,16 @@ rocksdb::Status HyperLogLog::Add(const Slice &user_key, const std::vectorPut(metadata_cf_handle_, ns_key, bytes); } - return storage_->Write(storage_->DefaultWriteOptions(), batch->GetWriteBatch()); + return storage_->Write(ctx, storage_->DefaultWriteOptions(), batch->GetWriteBatch()); } -rocksdb::Status HyperLogLog::Count(const Slice &user_key, uint64_t *ret) { +rocksdb::Status HyperLogLog::Count(engine::Context &ctx, const Slice &user_key, uint64_t *ret) { std::string ns_key = AppendNamespacePrefix(user_key); *ret = 0; std::vector registers; - { - LatestSnapShot ss(storage_); - Database::GetOptions get_options(ss.GetSnapShot()); - auto s = getRegisters(get_options, ns_key, ®isters); - if (!s.ok()) { - return s; - } + auto s = getRegisters(ctx, ns_key, ®isters); + if (!s.ok()) { + return s; } DCHECK_EQ(kHyperLogLogSegmentCount, registers.size()); std::vector> register_segments = TransformToSpan(registers); @@ -193,12 +189,12 @@ rocksdb::Status HyperLogLog::Count(const Slice &user_key, uint64_t *ret) { return rocksdb::Status::OK(); } -rocksdb::Status HyperLogLog::mergeUserKeys(Database::GetOptions get_options, const std::vector &user_keys, +rocksdb::Status HyperLogLog::mergeUserKeys(engine::Context &ctx, const std::vector &user_keys, std::vector *register_segments) { DCHECK_GE(user_keys.size(), static_cast(1)); std::string first_ns_key = AppendNamespacePrefix(user_keys[0]); - rocksdb::Status s = getRegisters(get_options, first_ns_key, register_segments); + rocksdb::Status s = getRegisters(ctx, first_ns_key, register_segments); if (!s.ok()) return s; // The set of keys that have been seen so far std::unordered_set seend_user_keys; @@ -212,7 +208,7 @@ rocksdb::Status HyperLogLog::mergeUserKeys(Database::GetOptions get_options, con } std::string source_key = AppendNamespacePrefix(source_user_key); std::vector source_registers; - s = getRegisters(get_options, source_key, &source_registers); + s = getRegisters(ctx, source_key, &source_registers); if (!s.ok()) return s; DCHECK_EQ(kHyperLogLogSegmentCount, source_registers.size()); DCHECK_EQ(kHyperLogLogSegmentCount, register_segments->size()); @@ -222,20 +218,18 @@ rocksdb::Status HyperLogLog::mergeUserKeys(Database::GetOptions get_options, con return rocksdb::Status::OK(); } -rocksdb::Status HyperLogLog::CountMultiple(const std::vector &user_key, uint64_t *ret) { +rocksdb::Status HyperLogLog::CountMultiple(engine::Context &ctx, const std::vector &user_key, uint64_t *ret) { DCHECK_GT(user_key.size(), static_cast(1)); std::vector register_segments; - // Using same snapshot for all get operations - LatestSnapShot ss(storage_); - Database::GetOptions get_options(ss.GetSnapShot()); - auto s = mergeUserKeys(get_options, user_key, ®ister_segments); + auto s = mergeUserKeys(ctx, user_key, ®ister_segments); if (!s.ok()) return s; std::vector> register_segment_span = TransformToSpan(register_segments); *ret = HllDenseEstimate(register_segment_span); return rocksdb::Status::OK(); } -rocksdb::Status HyperLogLog::Merge(const Slice &dest_user_key, const std::vector &source_user_keys) { +rocksdb::Status HyperLogLog::Merge(engine::Context &ctx, const Slice &dest_user_key, + const std::vector &source_user_keys) { if (source_user_keys.empty()) { return rocksdb::Status::OK(); } @@ -244,22 +238,17 @@ rocksdb::Status HyperLogLog::Merge(const Slice &dest_user_key, const std::vector LockGuard guard(storage_->GetLockManager(), dest_key); std::vector registers; HyperLogLogMetadata metadata; + + rocksdb::Status s = GetMetadata(ctx, dest_user_key, &metadata); + if (!s.ok() && !s.IsNotFound()) return s; { - // Using same snapshot for all get operations and release it after - // finishing the merge operation - LatestSnapShot ss(storage_); - Database::GetOptions get_options(ss.GetSnapShot()); - rocksdb::Status s = GetMetadata(get_options, dest_user_key, &metadata); - if (!s.ok() && !s.IsNotFound()) return s; - { - std::vector all_user_keys; - all_user_keys.reserve(source_user_keys.size() + 1); - all_user_keys.push_back(dest_user_key); - for (const auto &source_user_key : source_user_keys) { - all_user_keys.push_back(source_user_key); - } - s = mergeUserKeys(get_options, all_user_keys, ®isters); + std::vector all_user_keys; + all_user_keys.reserve(source_user_keys.size() + 1); + all_user_keys.push_back(dest_user_key); + for (const auto &source_user_key : source_user_keys) { + all_user_keys.push_back(source_user_key); } + s = mergeUserKeys(ctx, all_user_keys, ®isters); } auto batch = storage_->GetWriteBatchBase(); @@ -283,13 +272,13 @@ rocksdb::Status HyperLogLog::Merge(const Slice &dest_user_key, const std::vector batch->Put(metadata_cf_handle_, dest_key, bytes); } - return storage_->Write(storage_->DefaultWriteOptions(), batch->GetWriteBatch()); + return storage_->Write(ctx, storage_->DefaultWriteOptions(), batch->GetWriteBatch()); } -rocksdb::Status HyperLogLog::getRegisters(Database::GetOptions get_options, const Slice &ns_key, +rocksdb::Status HyperLogLog::getRegisters(engine::Context &ctx, const Slice &ns_key, std::vector *register_segments) { HyperLogLogMetadata metadata; - rocksdb::Status s = GetMetadata(get_options, ns_key, &metadata); + rocksdb::Status s = GetMetadata(ctx, ns_key, &metadata); if (!s.ok()) { if (s.IsNotFound()) { // return empty registers with the right size. @@ -299,8 +288,6 @@ rocksdb::Status HyperLogLog::getRegisters(Database::GetOptions get_options, cons return s; } - rocksdb::ReadOptions read_options = storage_->DefaultMultiGetOptions(); - read_options.snapshot = get_options.snapshot; // Multi get all segments std::vector sub_segment_keys; sub_segment_keys.reserve(kHyperLogLogSegmentCount); @@ -316,8 +303,8 @@ rocksdb::Status HyperLogLog::getRegisters(Database::GetOptions get_options, cons } register_segments->resize(kHyperLogLogSegmentCount); std::vector statuses(kHyperLogLogSegmentCount); - storage_->MultiGet(read_options, storage_->GetDB()->DefaultColumnFamily(), kHyperLogLogSegmentCount, - sub_segment_slices.data(), register_segments->data(), statuses.data()); + storage_->MultiGet(ctx, ctx.DefaultMultiGetOptions(), storage_->GetDB()->DefaultColumnFamily(), + kHyperLogLogSegmentCount, sub_segment_slices.data(), register_segments->data(), statuses.data()); for (size_t i = 0; i < kHyperLogLogSegmentCount; i++) { if (!statuses[i].ok() && !statuses[i].IsNotFound()) { register_segments->at(i).clear(); @@ -327,10 +314,10 @@ rocksdb::Status HyperLogLog::getRegisters(Database::GetOptions get_options, cons return rocksdb::Status::OK(); } -rocksdb::Status HyperLogLog::getRegisters(Database::GetOptions get_options, const Slice &ns_key, +rocksdb::Status HyperLogLog::getRegisters(engine::Context &ctx, const Slice &ns_key, std::vector *register_segments) { std::vector pinnable_slices; - rocksdb::Status s = getRegisters(get_options, ns_key, &pinnable_slices); + rocksdb::Status s = getRegisters(ctx, ns_key, &pinnable_slices); if (!s.ok()) return s; register_segments->reserve(kHyperLogLogSegmentCount); for (auto &pinnable_slice : pinnable_slices) { diff --git a/src/types/redis_hyperloglog.h b/src/types/redis_hyperloglog.h index 6b2e441b668..cf09f4445fb 100644 --- a/src/types/redis_hyperloglog.h +++ b/src/types/redis_hyperloglog.h @@ -28,27 +28,27 @@ namespace redis { class HyperLogLog : public Database { public: explicit HyperLogLog(engine::Storage *storage, const std::string &ns) : Database(storage, ns) {} - rocksdb::Status Add(const Slice &user_key, const std::vector &element_hashes, uint64_t *ret); - rocksdb::Status Count(const Slice &user_key, uint64_t *ret); + rocksdb::Status Add(engine::Context &ctx, const Slice &user_key, const std::vector &element_hashes, + uint64_t *ret); + rocksdb::Status Count(engine::Context &ctx, const Slice &user_key, uint64_t *ret); /// The count when user_keys.size() is greater than 1. - rocksdb::Status CountMultiple(const std::vector &user_key, uint64_t *ret); - rocksdb::Status Merge(const Slice &dest_user_key, const std::vector &source_user_keys); + rocksdb::Status CountMultiple(engine::Context &ctx, const std::vector &user_key, uint64_t *ret); + rocksdb::Status Merge(engine::Context &ctx, const Slice &dest_user_key, const std::vector &source_user_keys); static uint64_t HllHash(std::string_view); private: - [[nodiscard]] rocksdb::Status GetMetadata(Database::GetOptions get_options, const Slice &ns_key, - HyperLogLogMetadata *metadata); + [[nodiscard]] rocksdb::Status GetMetadata(engine::Context &ctx, const Slice &ns_key, HyperLogLogMetadata *metadata); - [[nodiscard]] rocksdb::Status mergeUserKeys(Database::GetOptions get_options, const std::vector &user_keys, + [[nodiscard]] rocksdb::Status mergeUserKeys(engine::Context &ctx, const std::vector &user_keys, std::vector *register_segments); /// Using multi-get to acquire the register_segments /// /// If the metadata is not found, register_segments will be initialized with 16 empty slices. - [[nodiscard]] rocksdb::Status getRegisters(Database::GetOptions get_options, const Slice &ns_key, + [[nodiscard]] rocksdb::Status getRegisters(engine::Context &ctx, const Slice &ns_key, std::vector *register_segments); /// Same with getRegisters, but the result is stored in a vector of strings. - [[nodiscard]] rocksdb::Status getRegisters(Database::GetOptions get_options, const Slice &ns_key, + [[nodiscard]] rocksdb::Status getRegisters(engine::Context &ctx, const Slice &ns_key, std::vector *register_segments); }; diff --git a/tests/cppunit/types/hyperloglog_test.cc b/tests/cppunit/types/hyperloglog_test.cc index 234b688e8ba..02a52383ac3 100644 --- a/tests/cppunit/types/hyperloglog_test.cc +++ b/tests/cppunit/types/hyperloglog_test.cc @@ -34,17 +34,17 @@ class RedisHyperLogLogTest : public TestBase { void SetUp() override { TestBase::SetUp(); - [[maybe_unused]] auto s = hll_->Del("hll"); + [[maybe_unused]] auto s = hll_->Del(*ctx_, "hll"); for (int x = 1; x <= 3; x++) { - s = hll_->Del("hll" + std::to_string(x)); + s = hll_->Del(*ctx_, "hll" + std::to_string(x)); } } void TearDown() override { TestBase::SetUp(); - [[maybe_unused]] auto s = hll_->Del("hll"); + [[maybe_unused]] auto s = hll_->Del(*ctx_, "hll"); for (int x = 1; x <= 3; x++) { - s = hll_->Del("hll" + std::to_string(x)); + s = hll_->Del(*ctx_, "hll" + std::to_string(x)); } } @@ -62,88 +62,88 @@ class RedisHyperLogLogTest : public TestBase { TEST_F(RedisHyperLogLogTest, PFADD) { uint64_t ret = 0; - ASSERT_TRUE(hll_->Add("hll", {}, &ret).ok() && ret == 0); + ASSERT_TRUE(hll_->Add(*ctx_, "hll", {}, &ret).ok() && ret == 0); // Approximated cardinality after creation is zero - ASSERT_TRUE(hll_->Count("hll", &ret).ok() && ret == 0); + ASSERT_TRUE(hll_->Count(*ctx_, "hll", &ret).ok() && ret == 0); // PFADD returns 1 when at least 1 reg was modified - ASSERT_TRUE(hll_->Add("hll", computeHashes({"a", "b", "c"}), &ret).ok()); + ASSERT_TRUE(hll_->Add(*ctx_, "hll", computeHashes({"a", "b", "c"}), &ret).ok()); ASSERT_EQ(1, ret); - ASSERT_TRUE(hll_->Count("hll", &ret).ok()); + ASSERT_TRUE(hll_->Count(*ctx_, "hll", &ret).ok()); ASSERT_EQ(3, ret); // PFADD returns 0 when no reg was modified - ASSERT_TRUE(hll_->Add("hll", computeHashes({"a", "b", "c"}), &ret).ok() && ret == 0); + ASSERT_TRUE(hll_->Add(*ctx_, "hll", computeHashes({"a", "b", "c"}), &ret).ok() && ret == 0); // PFADD works with empty string - ASSERT_TRUE(hll_->Add("hll", computeHashes({""}), &ret).ok() && ret == 1); + ASSERT_TRUE(hll_->Add(*ctx_, "hll", computeHashes({""}), &ret).ok() && ret == 1); // PFADD works with similar hash, which is likely to be in the same bucket - ASSERT_TRUE(hll_->Add("hll", {1, 2, 3, 2, 1}, &ret).ok() && ret == 1); - ASSERT_TRUE(hll_->Count("hll", &ret).ok()); + ASSERT_TRUE(hll_->Add(*ctx_, "hll", {1, 2, 3, 2, 1}, &ret).ok() && ret == 1); + ASSERT_TRUE(hll_->Count(*ctx_, "hll", &ret).ok()); ASSERT_EQ(7, ret); } TEST_F(RedisHyperLogLogTest, PFCOUNT_returns_approximated_cardinality_of_set) { uint64_t ret = 0; // pf add "1" to "5" - ASSERT_TRUE(hll_->Add("hll", computeHashes({"1", "2", "3", "4", "5"}), &ret).ok() && ret == 1); + ASSERT_TRUE(hll_->Add(*ctx_, "hll", computeHashes({"1", "2", "3", "4", "5"}), &ret).ok() && ret == 1); // pf count is 5 - ASSERT_TRUE(hll_->Count("hll", &ret).ok() && ret == 5); + ASSERT_TRUE(hll_->Count(*ctx_, "hll", &ret).ok() && ret == 5); // pf add "6" to "10" - ASSERT_TRUE(hll_->Add("hll", computeHashes({"6", "7", "8", "8", "9", "10"}), &ret).ok() && ret == 1); + ASSERT_TRUE(hll_->Add(*ctx_, "hll", computeHashes({"6", "7", "8", "8", "9", "10"}), &ret).ok() && ret == 1); // pf count is 10 - ASSERT_TRUE(hll_->Count("hll", &ret).ok() && ret == 10); + ASSERT_TRUE(hll_->Count(*ctx_, "hll", &ret).ok() && ret == 10); } TEST_F(RedisHyperLogLogTest, PFMERGE_results_on_the_cardinality_of_union_of_sets) { uint64_t ret = 0; // pf add hll1 a b c - ASSERT_TRUE(hll_->Add("hll1", computeHashes({"a", "b", "c"}), &ret).ok() && ret == 1); + ASSERT_TRUE(hll_->Add(*ctx_, "hll1", computeHashes({"a", "b", "c"}), &ret).ok() && ret == 1); // pf add hll2 b c d - ASSERT_TRUE(hll_->Add("hll2", computeHashes({"b", "c", "d"}), &ret).ok() && ret == 1); + ASSERT_TRUE(hll_->Add(*ctx_, "hll2", computeHashes({"b", "c", "d"}), &ret).ok() && ret == 1); // pf add hll3 c d e - ASSERT_TRUE(hll_->Add("hll3", computeHashes({"c", "d", "e"}), &ret).ok() && ret == 1); + ASSERT_TRUE(hll_->Add(*ctx_, "hll3", computeHashes({"c", "d", "e"}), &ret).ok() && ret == 1); // pf merge hll hll1 hll2 hll3 - ASSERT_TRUE(hll_->Merge("hll", {"hll1", "hll2", "hll3"}).ok()); + ASSERT_TRUE(hll_->Merge(*ctx_, "hll", {"hll1", "hll2", "hll3"}).ok()); // pf count hll is 5 - ASSERT_TRUE(hll_->Count("hll", &ret).ok()); + ASSERT_TRUE(hll_->Count(*ctx_, "hll", &ret).ok()); ASSERT_EQ(5, ret); } TEST_F(RedisHyperLogLogTest, PFCOUNT_multiple) { uint64_t ret = 0; - ASSERT_TRUE(hll_->CountMultiple({"hll1", "hll2", "hll3"}, &ret).ok()); + ASSERT_TRUE(hll_->CountMultiple(*ctx_, {"hll1", "hll2", "hll3"}, &ret).ok()); ASSERT_EQ(0, ret); // pf add hll1 a b c - ASSERT_TRUE(hll_->Add("hll1", computeHashes({"a", "b", "c"}), &ret).ok() && ret == 1); - ASSERT_TRUE(hll_->Count("hll1", &ret).ok()); + ASSERT_TRUE(hll_->Add(*ctx_, "hll1", computeHashes({"a", "b", "c"}), &ret).ok() && ret == 1); + ASSERT_TRUE(hll_->Count(*ctx_, "hll1", &ret).ok()); ASSERT_EQ(3, ret); - ASSERT_TRUE(hll_->CountMultiple({"hll1", "hll2", "hll3"}, &ret).ok()); + ASSERT_TRUE(hll_->CountMultiple(*ctx_, {"hll1", "hll2", "hll3"}, &ret).ok()); ASSERT_EQ(3, ret); // pf add hll2 b c d - ASSERT_TRUE(hll_->Add("hll2", computeHashes({"b", "c", "d"}), &ret).ok() && ret == 1); - ASSERT_TRUE(hll_->CountMultiple({"hll1", "hll2", "hll3"}, &ret).ok()); + ASSERT_TRUE(hll_->Add(*ctx_, "hll2", computeHashes({"b", "c", "d"}), &ret).ok() && ret == 1); + ASSERT_TRUE(hll_->CountMultiple(*ctx_, {"hll1", "hll2", "hll3"}, &ret).ok()); ASSERT_EQ(4, ret); // pf add hll3 c d e - ASSERT_TRUE(hll_->Add("hll3", computeHashes({"c", "d", "e"}), &ret).ok() && ret == 1); - ASSERT_TRUE(hll_->CountMultiple({"hll1", "hll2", "hll3"}, &ret).ok()); + ASSERT_TRUE(hll_->Add(*ctx_, "hll3", computeHashes({"c", "d", "e"}), &ret).ok() && ret == 1); + ASSERT_TRUE(hll_->CountMultiple(*ctx_, {"hll1", "hll2", "hll3"}, &ret).ok()); ASSERT_EQ(5, ret); // pf merge hll hll1 hll2 hll3 - ASSERT_TRUE(hll_->Merge("hll", {"hll1", "hll2", "hll3"}).ok()); + ASSERT_TRUE(hll_->Merge(*ctx_, "hll", {"hll1", "hll2", "hll3"}).ok()); // pf count hll is 5 - ASSERT_TRUE(hll_->Count("hll", &ret).ok()); + ASSERT_TRUE(hll_->Count(*ctx_, "hll", &ret).ok()); ASSERT_EQ(5, ret); - ASSERT_TRUE(hll_->CountMultiple({"hll1", "hll2", "hll3", "hll"}, &ret).ok()); + ASSERT_TRUE(hll_->CountMultiple(*ctx_, {"hll1", "hll2", "hll3", "hll"}, &ret).ok()); ASSERT_EQ(5, ret); } TEST_F(RedisHyperLogLogTest, PFCOUNT_multiple_keys_merge_returns_cardinality_of_union_1) { for (int x = 1; x < 1000; x++) { uint64_t ret = 0; - ASSERT_TRUE(hll_->Add("hll0", computeHashes({"foo-" + std::to_string(x)}), &ret).ok()); - ASSERT_TRUE(hll_->Add("hll1", computeHashes({"bar-" + std::to_string(x)}), &ret).ok()); - ASSERT_TRUE(hll_->Add("hll2", computeHashes({"zap-" + std::to_string(x)}), &ret).ok()); + ASSERT_TRUE(hll_->Add(*ctx_, "hll0", computeHashes({"foo-" + std::to_string(x)}), &ret).ok()); + ASSERT_TRUE(hll_->Add(*ctx_, "hll1", computeHashes({"bar-" + std::to_string(x)}), &ret).ok()); + ASSERT_TRUE(hll_->Add(*ctx_, "hll2", computeHashes({"zap-" + std::to_string(x)}), &ret).ok()); std::vector cards(3); - ASSERT_TRUE(hll_->Count("hll0", &cards[0]).ok()); - ASSERT_TRUE(hll_->Count("hll1", &cards[1]).ok()); - ASSERT_TRUE(hll_->Count("hll2", &cards[2]).ok()); + ASSERT_TRUE(hll_->Count(*ctx_, "hll0", &cards[0]).ok()); + ASSERT_TRUE(hll_->Count(*ctx_, "hll1", &cards[1]).ok()); + ASSERT_TRUE(hll_->Count(*ctx_, "hll2", &cards[2]).ok()); auto card = static_cast(cards[0] + cards[1] + cards[2]); double realcard = x * 3; // assert the ABS of 'card' and 'realcart' is within 5% of the cardinality @@ -160,14 +160,14 @@ TEST_F(RedisHyperLogLogTest, PFCOUNT_multiple_keys_merge_returns_cardinality_of_ for (auto j = 0; j < 3; j++) { uint64_t ret = 0; int rint = std::rand() % 20000; - ASSERT_TRUE(hll_->Add("hll" + std::to_string(j), computeHashes({std::to_string(rint)}), &ret).ok()); + ASSERT_TRUE(hll_->Add(*ctx_, "hll" + std::to_string(j), computeHashes({std::to_string(rint)}), &ret).ok()); realcard_vec.push_back(rint); } } std::vector cards(3); - ASSERT_TRUE(hll_->Count("hll0", &cards[0]).ok()); - ASSERT_TRUE(hll_->Count("hll1", &cards[1]).ok()); - ASSERT_TRUE(hll_->Count("hll2", &cards[2]).ok()); + ASSERT_TRUE(hll_->Count(*ctx_, "hll0", &cards[0]).ok()); + ASSERT_TRUE(hll_->Count(*ctx_, "hll1", &cards[1]).ok()); + ASSERT_TRUE(hll_->Count(*ctx_, "hll2", &cards[2]).ok()); auto card = static_cast(cards[0] + cards[1] + cards[2]); auto realcard = static_cast(realcard_vec.size()); double left = std::abs(card - realcard);