Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add supress_print attribute to the Executor class for silencing prints and tqdm #361

Merged
merged 9 commits into from
Apr 19, 2024
23 changes: 15 additions & 8 deletions cadCAD/engine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def distroduce_proc(

class Executor:
def __init__(self,
exec_context: ExecutionContext, configs: List[Configuration], sc=None, empty_return=False
exec_context: ExecutionContext, configs: List[Configuration], sc=None, empty_return=False, supress_print=False
) -> None:
self.sc = sc
self.SimExecutor = SimExecutor
Expand All @@ -79,6 +79,7 @@ def __init__(self,
self.additional_objs = exec_context.additional_objs
self.configs = configs
self.empty_return = empty_return
self.supress_print = supress_print

def execute(self) -> Tuple[object, object, Dict[str, object]]:
if self.empty_return is True:
Expand All @@ -97,12 +98,14 @@ def execute(self) -> Tuple[object, object, Dict[str, object]]:
config_idx = 0

# Execution Info
print_exec_info(self.exec_context, configs_as_objs(self.configs))
if self.supress_print is False:
print_exec_info(self.exec_context, configs_as_objs(self.configs))

t1 = time()
for x in tqdm(self.configs,
total=len(self.configs),
desc="Initializing configurations"):
desc="Initializing configurations",
disable=self.supress_print):
sessions.append(
{
'user_id': x.user_id, 'experiment_id': x.experiment_id, 'session_id': x.session_id,
Expand Down Expand Up @@ -180,7 +183,8 @@ def get_final_results(simulations: List[StateHistory],
flat_timesteps, tensor_fields = [], []
for sim_result, psu, ep in tqdm(list(zip(simulations, psus, eps)),
total=len(simulations),
desc='Flattening results'):
desc='Flattening results',
disable=self.supress_print):
if do_flatten:
flat_timesteps.append(flatten(sim_result))
tensor_fields.append(create_tensor_field(psu, ep))
Expand Down Expand Up @@ -209,8 +213,9 @@ def get_final_results(simulations: List[StateHistory],
else:
raise ValueError("Invalid execution mode specified")


print("Execution Method: " + self.exec_method.__name__)
if self.supress_print is False:
print("Execution Method: " + self.exec_method.__name__)

simulations_results = self.exec_method(
sim_executors, var_dict_list, states_lists, configs_structs, env_processes_list, Ts, SimIDs, RunIDs,
ExpIDs, SubsetIDs, SubsetWindows, original_N, self.additional_objs
Expand All @@ -219,7 +224,8 @@ def get_final_results(simulations: List[StateHistory],
final_result = get_final_results(
simulations_results, partial_state_updates, eps, sessions, remote_threshold)
elif self.exec_context == ExecutionMode.distributed:
print("Execution Method: " + self.exec_method.__name__)
if self.supress_print is False:
print("Execution Method: " + self.exec_method.__name__)
simulations_results = self.exec_method(
sim_executors, var_dict_list, states_lists, configs_structs, env_processes_list, Ts,
SimIDs, RunIDs, ExpIDs, SubsetIDs, SubsetWindows, original_N, self.sc
Expand All @@ -228,6 +234,7 @@ def get_final_results(simulations: List[StateHistory],
simulations_results, partial_state_updates, eps, sessions)

t2 = time()
print(f"Total execution time: {t2 - t1 :.2f}s")
if self.supress_print is False:
print(f"Total execution time: {t2 - t1 :.2f}s")

return final_result
2 changes: 0 additions & 2 deletions cadCAD/engine/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ def single_proc_exec(
Ts, SimIDs, Ns, SubsetIDs, SubsetWindows, var_dict_list)

results: List = []
print(f'Execution Mode: single_threaded')
for raw_param in zip(*raw_params):
simulation_exec, states_list, config, env_processes, T, sim_id, N, subset_id, subset_window, var_dict = raw_param
result = simulation_exec(
Expand All @@ -60,7 +59,6 @@ def parallelize_simulations(
additional_objs=None
):

print(f'Execution Mode: parallelized')
params = list(
zip(
simulation_execs, var_dict_list, states_lists, configs_structs, env_processes_list,
Expand Down
3 changes: 2 additions & 1 deletion cadCAD/tools/execution/easy_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ def easy_run(
drop_substeps=True,
exec_mode='local',
deepcopy_off=False,
supress_print=False
) -> pd.DataFrame:
"""
Run cadCAD simulations without headaches.
Expand All @@ -69,7 +70,7 @@ def easy_run(
elif exec_mode == 'single':
_exec_mode = ExecutionMode().single_mode
exec_context = ExecutionContext(_exec_mode, additional_objs={'deepcopy_off': deepcopy_off})
executor = Executor(exec_context=exec_context, configs=configs)
executor = Executor(exec_context=exec_context, configs=configs, supress_print=supress_print)

# Execute the cadCAD experiment
(records, tensor_field, _) = executor.execute()
Expand Down
20 changes: 13 additions & 7 deletions testing/test_param_count.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,21 @@
from cadCAD.engine import Executor, ExecutionContext, ExecutionMode
import pytest


P_no_lst = {'pA': 1, 'pB': 2, 'pC': 3}
P_single_lst = {'pA': [1], 'pB': [1], 'pC': [3]}
P_single_swp = {'pA': [4, 5, 6], 'pB': [1], 'pC': [3]}
P_all_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1, 2, 3]}
P_all_but_one_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1]}
Ps = [P_no_lst, P_single_lst, P_single_swp, P_all_swp, P_all_but_one_swp]

CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3),
(3, 1, 3, 3, 3), (1, 1, 3, 3, 3),
(3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)]
CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3),
(1, 3, 3, 3, 3),
(3, 1, 3, 3, 3),
(1, 1, 3, 3, 3),
(3, 3, 1, 3, 3),
(1, 3, 1, 3, 3),
(1, 1, 1, 3, 3)]


def run_experiment(exp: Experiment, mode: str):
Expand All @@ -35,10 +40,12 @@ def p_test_param_count(params, _2, _3, _4):
return {'sigA': None}
return p_test_param_count


def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={}) -> Experiment:

INITIAL_STATE = {'varA': None}
PSUBs = [{'policies': {'sigA': param_count_test_policy_generator(params)}, 'variables': {'varA': param_count_test_suf_generator(params)}}] * N_substeps
PSUBs = [{'policies': {'sigA': param_count_test_policy_generator(
params)}, 'variables': {'varA': param_count_test_suf_generator(params)}}] * N_substeps

SIM_CONFIG = config_sim(
{
Expand All @@ -58,8 +65,8 @@ def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_s
return exp


def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps,P) -> int:
return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1)
def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps, P) -> int:
return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1)


@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST)
Expand All @@ -69,7 +76,6 @@ def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s, P):
len(run_experiment(create_experiments(*args), 'single_proc'))



@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST)
@pytest.mark.parametrize("P", Ps)
def test_row_count_local(N_sim, N_sw, N_r, N_t, N_s, P):
Expand Down
77 changes: 77 additions & 0 deletions testing/test_print.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
from cadCAD.configuration import Experiment
from cadCAD.configuration.utils import config_sim
from cadCAD.engine import Executor, ExecutionContext, ExecutionMode
import pytest

P_no_lst = {'pA': 1, 'pB': 2, 'pC': 3}
P_single_lst = {'pA': [1], 'pB': [1], 'pC': [3]}
P_single_swp = {'pA': [4, 5, 6], 'pB': [1], 'pC': [3]}
P_all_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1, 2, 3]}
P_all_but_one_swp = {'pA': [7, 8, 9], 'pB': [1, 2, 3], 'pC': [1]}
Ps = [P_no_lst, P_single_lst, P_single_swp, P_all_swp, P_all_but_one_swp]

CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3),
(1, 3, 3, 3, 3),
(3, 1, 3, 3, 3),
(1, 1, 3, 3, 3),
(3, 3, 1, 3, 3),
(1, 3, 1, 3, 3),
(1, 1, 1, 3, 3)]


def run_experiment(exp: Experiment, mode: str, supress_print=False):
exec_context = ExecutionContext(mode)
executor = Executor(exec_context=exec_context, configs=exp.configs, supress_print=supress_print)
(records, tensor_field, _) = executor.execute()
return records


def param_count_test_suf_generator(provided_params):
def s_test_param_count(params, _2, _3, _4, _5):
assert params.keys() == provided_params.keys(), 'Params are not matching'
return ('varA', None)
return s_test_param_count


def param_count_test_policy_generator(provided_params):
def p_test_param_count(params, _2, _3, _4):
assert params.keys() == provided_params.keys(), 'Params are not matching'
return {'sigA': None}
return p_test_param_count


def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={}) -> Experiment:

INITIAL_STATE = {'varA': None}
PSUBs = [{'policies': {'sigA': param_count_test_policy_generator(
params)}, 'variables': {'varA': param_count_test_suf_generator(params)}}] * N_substeps

SIM_CONFIG = config_sim(
{
"N": N_runs,
"T": range(N_timesteps),
"M": params, # Optional
}
)

exp = Experiment()
for i_sim in range(N_simulations):
exp.append_model(
sim_configs=SIM_CONFIG,
initial_state=INITIAL_STATE,
partial_state_update_blocks=PSUBs
)
return exp



def test_print(capfd):
exp = run_experiment(create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={'a': 0}), 'single_proc', supress_print=False)
out, err = capfd.readouterr()
assert " ___________ ____\n ________ __ ___/ / ____/ | / __ \\\n / ___/ __` / __ / / / /| | / / / /\n/ /__/ /_/ / /_/ / /___/ ___ |/ /_/ /\n\\___/\\__,_/\\__,_/\\____/_/ |_/_____/\nby cadCAD" in out
assert 'Initializing configurations' in err

exp = run_experiment(create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3, params={'a': 0}), 'single_proc', supress_print=True)
out, err = capfd.readouterr()
assert out == ''
assert err == ''
70 changes: 70 additions & 0 deletions testing/test_results_signature.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
from cadCAD.configuration import Experiment
from cadCAD.configuration.utils import config_sim
from cadCAD.engine import Executor, ExecutionContext, ExecutionMode
import pytest
import pandas as pd # type: ignore
from typing import Dict, List

# (N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps)


CONFIG_SIGNATURES_TO_TEST = [
(1, 20, 5, 10, 5), (3, 3, 3, 3, 3), (1, 3, 3, 3, 3),
(3, 1, 3, 3, 3), (1, 1, 3, 3, 3),
(3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)]


def run_experiment(exp: Experiment, mode: str) -> List[Dict]:
exec_context = ExecutionContext(mode)
executor = Executor(exec_context=exec_context, configs=exp.configs)
(records, tensor_field, _) = executor.execute()
return records


def create_experiments(N_simulations=3, N_sweeps=3, N_runs=3, N_timesteps=3, N_substeps=3) -> Experiment:

INITIAL_STATE = {'varA': None}
PSUBs = [{'policies': {}, 'variables': {}}] * N_substeps
params = {'A': [None] * N_sweeps,
'B': [None]}

SIM_CONFIG = config_sim(
{
"N": N_runs,
"T": range(N_timesteps),
"M": params, # Optional
}
)

exp = Experiment()
for i_sim in range(N_simulations):
exp.append_model(
sim_configs=SIM_CONFIG,
initial_state=INITIAL_STATE,
partial_state_update_blocks=PSUBs
)
return exp


def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps) -> int:
return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1)


@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST)
def test_identifiers_value_counts_single(N_sim, N_sw, N_r, N_t, N_s):
args = (N_sim, N_sw, N_r, N_t, N_s)
results = run_experiment(create_experiments(*args), 'single_proc')
df = pd.DataFrame(results).query("timestep > 0")
assert len(set(df.timestep.value_counts().values)) == 1
assert len(set(df.subset.value_counts().values)) == 1
assert len(set(df.run.value_counts().values)) == 1


@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST[:-1])
def test_identifiers_value_counts_multi(N_sim, N_sw, N_r, N_t, N_s):
args = (N_sim, N_sw, N_r, N_t, N_s)
results = run_experiment(create_experiments(*args), 'multi_proc')
df = pd.DataFrame(results).query("timestep > 0")
assert len(set(df.timestep.value_counts().values)) == 1
assert len(set(df.subset.value_counts().values)) == 1
assert len(set(df.run.value_counts().values)) == 1
9 changes: 5 additions & 4 deletions testing/test_row_count.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,14 @@
from cadCAD.configuration.utils import config_sim
from cadCAD.engine import Executor, ExecutionContext, ExecutionMode
import pytest

import pandas as pd # type: ignore
from typing import Dict, List

CONFIG_SIGNATURES_TO_TEST = [(3, 3, 3, 3, 3), (1, 3, 3, 3, 3),
(3, 1, 3, 3, 3), (1, 1, 3, 3, 3),
(3, 3, 1, 3, 3), (1, 3, 1, 3, 3), (1, 1, 1, 3, 3)]

def run_experiment(exp: Experiment, mode: str):
def run_experiment(exp: Experiment, mode: str) -> List[Dict]:
exec_context = ExecutionContext(mode)
executor = Executor(exec_context=exec_context, configs=exp.configs)
(records, tensor_field, _) = executor.execute()
Expand Down Expand Up @@ -44,11 +45,11 @@ def expected_rows(N_simulations, N_sweeps, N_runs, N_timesteps, N_substeps) -> i
return N_simulations * N_sweeps * N_runs * (N_timesteps * N_substeps + 1)



@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST)
def test_row_count_single(N_sim, N_sw, N_r, N_t, N_s):
args = (N_sim, N_sw, N_r, N_t, N_s)
assert len(run_experiment(create_experiments(*args), 'single_proc')) == expected_rows(*args)
results = run_experiment(create_experiments(*args), 'single_proc')
assert len(results) == expected_rows(*args)


@pytest.mark.parametrize("N_sim,N_sw,N_r,N_t,N_s", CONFIG_SIGNATURES_TO_TEST)
Expand Down
Loading