Skip to content

Commit

Permalink
prevent draining of config.systems.sessions
Browse files Browse the repository at this point in the history
  • Loading branch information
MiaAltieri committed Oct 27, 2023
1 parent 2c54e12 commit eac5377
Show file tree
Hide file tree
Showing 2 changed files with 92 additions and 73 deletions.
14 changes: 14 additions & 0 deletions tests/integration/sharding_tests/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,17 @@ def get_databases_for_shard(mongos_client, shard_name) -> Optional[List[str]]:
return

return databases_collection.distinct("_id", {"primary": shard_name})


def has_correct_shards(mongos_client, expected_shards: List[str]) -> bool:
"""Returns true if the cluster config has the expected shards."""
shard_names = get_cluster_shards(mongos_client)
return shard_names == set(expected_shards)


def shard_has_databases(
mongos_client, shard_name: str, expected_databases_on_shard: List[str]
) -> bool:
"""Returns true if the provided shard is a primary for the provided databases."""
databases_on_shard = get_databases_for_shard(mongos_client, shard_name=shard_name)
return set(databases_on_shard) == set(expected_databases_on_shard)
151 changes: 78 additions & 73 deletions tests/integration/sharding_tests/test_sharding.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,8 @@

from .helpers import (
generate_mongodb_client,
get_cluster_shards,
get_databases_for_shard,
has_correct_shards,
shard_has_databases,
verify_data_mongodb,
write_data_to_mongodb,
)
Expand All @@ -31,20 +31,23 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None:
my_charm = await ops_test.build_charm(".")
await ops_test.model.deploy(
my_charm,
num_units=2,
num_units=1,
config={"role": "config-server"},
application_name=CONFIG_SERVER_APP_NAME,
)
await ops_test.model.deploy(
my_charm, num_units=2, config={"role": "shard"}, application_name=SHARD_ONE_APP_NAME
my_charm, num_units=1, config={"role": "shard"}, application_name=SHARD_ONE_APP_NAME
)
await ops_test.model.deploy(
my_charm, num_units=2, config={"role": "shard"}, application_name=SHARD_TWO_APP_NAME
my_charm, num_units=1, config={"role": "shard"}, application_name=SHARD_TWO_APP_NAME
)
await ops_test.model.deploy(
my_charm, num_units=1, config={"role": "shard"}, application_name=SHARD_THREE_APP_NAME
)

async with ops_test.fast_forward():
await ops_test.model.wait_for_idle(
apps=[CONFIG_SERVER_APP_NAME, SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME],
apps=[CONFIG_SERVER_APP_NAME, SHARD_ONE_APP_NAME, SHARD_THREE_APP_NAME],
idle_period=20,
raise_on_blocked=False,
timeout=TIMEOUT,
Expand All @@ -66,24 +69,33 @@ async def test_cluster_active(ops_test: OpsTest) -> None:
f"{SHARD_TWO_APP_NAME}:{SHARD_REL_NAME}",
f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}",
)
await ops_test.model.integrate(
f"{SHARD_THREE_APP_NAME}:{SHARD_REL_NAME}",
f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}",
)

async with ops_test.fast_forward():
await ops_test.model.wait_for_idle(
apps=[CONFIG_SERVER_APP_NAME, SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME],
apps=[
CONFIG_SERVER_APP_NAME,
SHARD_ONE_APP_NAME,
SHARD_TWO_APP_NAME,
SHARD_THREE_APP_NAME,
],
idle_period=20,
status="active",
timeout=TIMEOUT,
raise_on_error=False,
)

# verify sharded cluster config
mongos_client = await generate_mongodb_client(
ops_test, app_name=CONFIG_SERVER_APP_NAME, mongos=True
)
shard_names = get_cluster_shards(mongos_client)
expected_shard_names = [SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME]
assert shard_names == set(
expected_shard_names

# verify sharded cluster config
assert has_correct_shards(
mongos_client,
expected_shards=[SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME, SHARD_THREE_APP_NAME],
), "Config server did not process config properly"

# TODO Future PR: assert that CONFIG_SERVER_APP_NAME, SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME
Expand All @@ -98,49 +110,49 @@ async def test_sharding(ops_test: OpsTest) -> None:
ops_test, app_name=CONFIG_SERVER_APP_NAME, mongos=True
)

# write data to shard one
# write data to shard two
write_data_to_mongodb(
mongos_client,
db_name="animals_database_1",
coll_name="horses",
content={"horse-breed": "unicorn", "real": True},
)
mongos_client.admin.command("movePrimary", "animals_database_1", to=SHARD_ONE_APP_NAME)
mongos_client.admin.command("movePrimary", "animals_database_1", to=SHARD_TWO_APP_NAME)

# write data to shard two
# write data to shard three
write_data_to_mongodb(
mongos_client,
db_name="animals_database_2",
coll_name="horses",
content={"horse-breed": "pegasus", "real": True},
)
mongos_client.admin.command("movePrimary", "animals_database_2", to=SHARD_TWO_APP_NAME)
mongos_client.admin.command("movePrimary", "animals_database_2", to=SHARD_THREE_APP_NAME)

# log into shard 1 verify data
shard_one_client = await generate_mongodb_client(
ops_test, app_name=SHARD_ONE_APP_NAME, mongos=False
# log into shard two verify data
shard_two_client = await generate_mongodb_client(
ops_test, app_name=SHARD_TWO_APP_NAME, mongos=False
)
has_correct_data = verify_data_mongodb(
shard_one_client,
shard_two_client,
db_name="animals_database_1",
coll_name="horses",
key="horse-breed",
value="unicorn",
)
assert has_correct_data, "data not written to shard-one"
assert has_correct_data, "data not written to shard-two"

# log into shard 2 verify data
shard_two_client = await generate_mongodb_client(
ops_test, app_name=SHARD_TWO_APP_NAME, mongos=False
shard_three_client = await generate_mongodb_client(
ops_test, app_name=SHARD_THREE_APP_NAME, mongos=False
)
has_correct_data = verify_data_mongodb(
shard_two_client,
shard_three_client,
db_name="animals_database_2",
coll_name="horses",
key="horse-breed",
value="pegasus",
)
assert has_correct_data, "data not written to shard-two"
assert has_correct_data, "data not written to shard-three"


async def test_shard_removal(ops_test: OpsTest) -> None:
Expand All @@ -151,29 +163,6 @@ async def test_shard_removal(ops_test: OpsTest) -> None:
- The balancer is turned back on if turned off.
- Config server supp orts removing multiple shards.
"""
# add a third shard, so that we can remove two shards at a time.
my_charm = await ops_test.build_charm(".")
await ops_test.model.deploy(
my_charm, num_units=1, config={"role": "shard"}, application_name=SHARD_THREE_APP_NAME
)
await ops_test.model.integrate(
f"{SHARD_THREE_APP_NAME}:{SHARD_REL_NAME}",
f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}",
)

await ops_test.model.wait_for_idle(
apps=[
CONFIG_SERVER_APP_NAME,
SHARD_ONE_APP_NAME,
SHARD_TWO_APP_NAME,
SHARD_THREE_APP_NAME,
],
idle_period=20,
status="active",
timeout=TIMEOUT,
raise_on_error=False,
)

# turn off balancer.
mongos_client = await generate_mongodb_client(
ops_test, app_name=CONFIG_SERVER_APP_NAME, mongos=True
Expand All @@ -184,17 +173,22 @@ async def test_shard_removal(ops_test: OpsTest) -> None:

# remove two shards at the same time
await ops_test.model.applications[CONFIG_SERVER_APP_NAME].remove_relation(
f"{SHARD_ONE_APP_NAME}:{SHARD_REL_NAME}",
f"{SHARD_TWO_APP_NAME}:{SHARD_REL_NAME}",
f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}",
)
await ops_test.model.applications[CONFIG_SERVER_APP_NAME].remove_relation(
f"{SHARD_TWO_APP_NAME}:{SHARD_REL_NAME}",
f"{SHARD_THREE_APP_NAME}:{SHARD_REL_NAME}",
f"{CONFIG_SERVER_APP_NAME}:{CONFIG_SERVER_REL_NAME}",
)

async with ops_test.fast_forward():
await ops_test.model.wait_for_idle(
apps=[CONFIG_SERVER_APP_NAME, SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME],
apps=[
CONFIG_SERVER_APP_NAME,
SHARD_ONE_APP_NAME,
SHARD_TWO_APP_NAME,
SHARD_THREE_APP_NAME,
],
idle_period=20,
status="active",
timeout=TIMEOUT,
Expand All @@ -207,19 +201,16 @@ async def test_shard_removal(ops_test: OpsTest) -> None:
balancer_state = mongos_client.admin.command("balancerStatus")
assert balancer_state["mode"] != "off", "balancer not turned back on from config server"

# veriy sharded cluster config
shard_names = get_cluster_shards(mongos_client)
expected_shard_names = [SHARD_THREE_APP_NAME]
assert shard_names == set(
expected_shard_names
# verify sharded cluster config
assert has_correct_shards(
mongos_client, expected_shards=[SHARD_ONE_APP_NAME]
), "Config server did not process config properly"

# verify databases that had primaries shard-one and shard-two are now on shard-three
databases_on_shard = get_databases_for_shard(mongos_client, shard_name=SHARD_THREE_APP_NAME)
expected_databases_on_shard = ["animals_database_1", "animals_database_2"]
assert databases_on_shard, "No databases on the final shard."
assert set(databases_on_shard) == set(
expected_databases_on_shard
# verify no data lost
assert shard_has_databases(
mongos_client,
shard_name=SHARD_ONE_APP_NAME,
expected_databases_on_shard=["animals_database_1", "animals_database_2"],
), "Not all databases on final shard"


Expand Down Expand Up @@ -258,6 +249,22 @@ async def test_removal_of_non_primary_shard(ops_test: OpsTest):
raise_on_error=False,
)

mongos_client = await generate_mongodb_client(
ops_test, app_name=CONFIG_SERVER_APP_NAME, mongos=True
)

# verify sharded cluster config
assert has_correct_shards(
mongos_client, expected_shards=[SHARD_ONE_APP_NAME]
), "Config server did not process config properly"

# verify no data lost
assert shard_has_databases(
mongos_client,
shard_name=SHARD_ONE_APP_NAME,
expected_databases_on_shard=["animals_database_1", "animals_database_2"],
), "Not all databases on final shard"


async def test_unconventual_shard_removal(ops_test: OpsTest):
"""Tests that removing a shard application safely drains data.
Expand All @@ -273,7 +280,7 @@ async def test_unconventual_shard_removal(ops_test: OpsTest):

async with ops_test.fast_forward():
await ops_test.model.wait_for_idle(
apps=[CONFIG_SERVER_APP_NAME, SHARD_TWO_APP_NAME, SHARD_THREE_APP_NAME],
apps=[CONFIG_SERVER_APP_NAME, SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME],
idle_period=20,
status="active",
timeout=TIMEOUT,
Expand All @@ -282,20 +289,18 @@ async def test_unconventual_shard_removal(ops_test: OpsTest):

ops_test.model.remove_application(SHARD_TWO_APP_NAME, block_until_done=True)

# veriy sharded cluster config
mongos_client = await generate_mongodb_client(
ops_test, app_name=CONFIG_SERVER_APP_NAME, mongos=True
)
shard_names = get_cluster_shards(mongos_client)
expected_shard_names = [SHARD_THREE_APP_NAME]
assert shard_names == set(
expected_shard_names

# verify sharded cluster config
assert has_correct_shards(
mongos_client, expected_shards=[SHARD_ONE_APP_NAME]
), "Config server did not process config properly"

# verify no data lost
databases_on_shard = get_databases_for_shard(mongos_client, shard_name=SHARD_THREE_APP_NAME)
expected_databases_on_shard = ["animals_database_1", "animals_database_2"]
assert databases_on_shard, "No databases on the final shard."
assert set(databases_on_shard) == set(
expected_databases_on_shard
assert shard_has_databases(
mongos_client,
shard_name=SHARD_ONE_APP_NAME,
expected_databases_on_shard=["animals_database_1", "animals_database_2"],
), "Not all databases on final shard"

0 comments on commit eac5377

Please sign in to comment.