Skip to content

Commit

Permalink
update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
danlessa committed Apr 4, 2024
1 parent 8b73834 commit 9fc2ffc
Show file tree
Hide file tree
Showing 3 changed files with 88 additions and 11 deletions.
20 changes: 13 additions & 7 deletions testing/test_param_count.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,21 @@
from cadCAD.engine import Executor, ExecutionContext, ExecutionMode
import pytest


P_no_lst = {'pA': 1, 'pB': 2, 'pC': 3}
P_single_lst = {'pA': [1], 'pB': [1], 'pC': [3]}
P_single_swp = {'pA': [4, 5, 6], 'pB': [1], 'pC': [3]}
P_all_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1, 2, 3]}
P_all_but_one_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1]}
Ps = [P_no_lst, P_single_lst, P_single_swp, P_all_swp, P_all_but_one_swp]

CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3),
(3, 1, 3, 3, 3), (1, 1, 3, 3, 3),
(3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)]
CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3),
(1, 3, 3, 3, 3),
(3, 1, 3, 3, 3),
(1, 1, 3, 3, 3),
(3, 3, 1, 3, 3),
(1, 3, 1, 3, 3),
(1, 1, 1, 3, 3)]


def run_experiment(exp: Experiment, mode: str):
Expand All @@ -35,10 +40,12 @@ def p_test_param_count(params, _2, _3, _4):
return {'sigA': None}
return p_test_param_count


def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={}) -> Experiment:

INITIAL_STATE = {'varA': None}
PSUBs = [{'policies': {'sigA': param_count_test_policy_generator(params)}, 'variables': {'varA': param_count_test_suf_generator(params)}}] * N_substeps
PSUBs = [{'policies': {'sigA': param_count_test_policy_generator(
params)}, 'variables': {'varA': param_count_test_suf_generator(params)}}] * N_substeps

SIM_CONFIG = config_sim(
{
Expand All @@ -58,8 +65,8 @@ def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_s
return exp


def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps,P) -> int:
return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1)
def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps, P) -> int:
return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1)


@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST)
Expand All @@ -69,7 +76,6 @@ def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s, P):
len(run_experiment(create_experiments(*args), 'single_proc'))



@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST)
@pytest.mark.parametrize("P", Ps)
def test_row_count_local(N_sim, N_sw, N_r, N_t, N_s, P):
Expand Down
70 changes: 70 additions & 0 deletions testing/test_results_signature.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
from cadCAD.configuration import Experiment
from cadCAD.configuration.utils import config_sim
from cadCAD.engine import Executor, ExecutionContext, ExecutionMode
import pytest
import pandas as pd # type: ignore
from typing import Dict, List

# (N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps)


CONFIG_SIGNATURES_TO_TEST = [
(1, 20, 5, 10, 5), (3, 3, 3, 3, 3), (1, 3, 3, 3, 3),
(3, 1, 3, 3, 3), (1, 1, 3, 3, 3),
(3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)]


def run_experiment(exp: Experiment, mode: str) -> List[Dict]:
exec_context = ExecutionContext(mode)
executor = Executor(exec_context=exec_context, configs=exp.configs)
(records, tensor_field, _) = executor.execute()
return records


def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3) -> Experiment:

INITIAL_STATE = {'varA': None}
PSUBs = [{'policies': {}, 'variables': {}}] * N_substeps
params = {'A': [None] * N_sweeps,
'B': [None]}

SIM_CONFIG = config_sim(
{
"N": N_runs,
"T": range(N_timesteps),
"M": params, # Optional
}
)

exp = Experiment()
for i_sim in range(N_simulations):
exp.append_model(
sim_configs=SIM_CONFIG,
initial_state=INITIAL_STATE,
partial_state_update_blocks=PSUBs
)
return exp


def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps) -> int:
return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1)


@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST)
def test_identifiers_value_counts_single(N_sim, N_sw, N_r, N_t, N_s):
args = (N_sim, N_sw, N_r, N_t, N_s)
results = run_experiment(create_experiments(*args), 'single_proc')
df = pd.DataFrame(results).query("timestep > 0")
assert len(set(df.timestep.value_counts().values)) == 1
assert len(set(df.subset.value_counts().values)) == 1
assert len(set(df.run.value_counts().values)) == 1


@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST[:-1])
def test_identifiers_value_counts_multi(N_sim, N_sw, N_r, N_t, N_s):
args = (N_sim, N_sw, N_r, N_t, N_s)
results = run_experiment(create_experiments(*args), 'multi_proc')
df = pd.DataFrame(results).query("timestep > 0")
assert len(set(df.timestep.value_counts().values)) == 1
assert len(set(df.subset.value_counts().values)) == 1
assert len(set(df.run.value_counts().values)) == 1
9 changes: 5 additions & 4 deletions testing/test_row_count.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@
from cadCAD.configuration.utils import config_sim
from cadCAD.engine import Executor, ExecutionContext, ExecutionMode
import pytest

import pandas as pd # type: ignore
from typing import Dict, List

CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3),
(3, 1, 3, 3, 3), (1, 1, 3, 3, 3),
(3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)]

def run_experiment(exp: Experiment, mode: str):
def run_experiment(exp: Experiment, mode: str) -> List[Dict]:
exec_context = ExecutionContext(mode)
executor = Executor(exec_context=exec_context, configs=exp.configs)
(records, tensor_field, _) = executor.execute()
Expand Down Expand Up @@ -44,11 +45,11 @@ def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps) -> i
return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1)



@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST)
def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s):
args = (N_sim, N_sw, N_r, N_t, N_s)
assert len(run_experiment(create_experiments(*args), 'single_proc')) == expected_rows(*args)
results = run_experiment(create_experiments(*args), 'single_proc')
assert len(results) == expected_rows(*args)


@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST)
Expand Down

0 comments on commit 9fc2ffc

Please sign in to comment.