From 326bc538912e6b97d2eaab9efb971abbaaee6640 Mon Sep 17 00:00:00 2001 From: John Harwell Date: Fri, 4 Oct 2024 13:42:29 -0500 Subject: [PATCH] refactor(#317): Better design - Continue moving things over to the new PathSet() paradigm - I MAY want to have the batch criteria own its own pathset; not sure if that's a good idea or not. Will revisit. --- .../tutorials/project/template_input_file.rst | 8 +- sierra/core/batchroot.py | 26 ++-- sierra/core/experiment/bindings.py | 4 +- sierra/core/experiment/spec.py | 9 +- sierra/core/exproot.py | 29 ++++ sierra/core/generators/exp_creator.py | 21 +-- sierra/core/generators/exp_generators.py | 20 +-- sierra/core/generators/generator_factory.py | 8 +- sierra/core/models/interface.py | 11 +- .../core/pipeline/stage1/pipeline_stage1.py | 6 +- sierra/core/pipeline/stage2/exp_runner.py | 22 +-- sierra/core/pipeline/stage3/collate.py | 4 +- .../core/pipeline/stage3/pipeline_stage3.py | 3 +- sierra/core/pipeline/stage4/graphs/collate.py | 31 ++-- .../pipeline/stage4/graphs/inter/generate.py | 6 +- .../pipeline/stage4/graphs/inter/heatmap.py | 2 +- .../core/pipeline/stage4/graphs/inter/line.py | 8 +- .../pipeline/stage4/graphs/intra/generate.py | 52 ++++--- sierra/core/pipeline/stage4/model_runner.py | 45 +++--- sierra/core/pipeline/stage4/render.py | 22 +-- .../stage5/inter_scenario_comparator.py | 12 +- .../stage5/intra_scenario_comparator.py | 135 ++++++++---------- sierra/core/platform.py | 6 +- sierra/core/ros1/callbacks.py | 2 +- sierra/core/utils.py | 22 ++- sierra/core/variables/batch_criteria.py | 106 +++++++------- sierra/core/variables/population_size.py | 6 +- sierra/plugins/platform/argos/cmdline.py | 2 +- .../argos/generators/platform_generators.py | 16 +-- sierra/plugins/platform/argos/plugin.py | 13 +- .../argos/variables/physics_engines.py | 8 +- .../variables/population_constant_density.py | 24 ++-- .../argos/variables/population_size.py | 7 +- .../variables/population_variable_density.py | 22 +-- sierra/plugins/platform/ros1gazebo/plugin.py | 15 +- .../ros1gazebo/variables/population_size.py | 7 +- .../generators/platform_generators.py | 4 +- sierra/plugins/platform/ros1robot/plugin.py | 33 ++--- .../ros1robot/variables/population_size.py | 9 +- 39 files changed, 393 insertions(+), 393 deletions(-) create mode 100644 sierra/core/exproot.py diff --git a/docs/src/tutorials/project/template_input_file.rst b/docs/src/tutorials/project/template_input_file.rst index 16231306..8ba8801d 100644 --- a/docs/src/tutorials/project/template_input_file.rst +++ b/docs/src/tutorials/project/template_input_file.rst @@ -165,7 +165,7 @@ Any of the following may be inserted: - + ... @@ -189,7 +189,7 @@ Any of the following may be inserted: - + ... @@ -224,7 +224,7 @@ Any of the following may be inserted: - + ... @@ -247,7 +247,7 @@ Any of the following may be inserted: - + ... diff --git a/sierra/core/batchroot.py b/sierra/core/batchroot.py index 0cc7a800..d5640904 100644 --- a/sierra/core/batchroot.py +++ b/sierra/core/batchroot.py @@ -94,18 +94,18 @@ def to_path(self) -> pathlib.Path: class PathSet(): def __init__(self, root: ExpRoot) -> None: - self.root = root - self.input_root = self.root.to_path() / "exp-inputs" - self.output_root = self.root.to_path() / "exp-outputs" - self.graph_root = self.root.to_path() / "graphs" - self.model_root = self.root.to_path() / "models" - self.stat_root = self.root.to_path() / "statistics" + self.input_root = root.to_path() / "exp-inputs" + self.output_root = root.to_path() / "exp-outputs" + self.graph_root = root.to_path() / "graphs" + self.model_root = root.to_path() / "models" + self.stat_root = root.to_path() / "statistics" self.stat_exec_root = self.stat_root.to_path() / "exec" - self.imagize_root = self.root.to_path() / "imagize" - self.video_root = self.root.to_path() / "videos" - self.stat_collate = self.stat_root.to_path() / "collated" - self.graph_collate = self.graph_root.to_path() / "collated" - self.scratch_root = self.root.to_path() / "scratch" + self.imagize_root = root.to_path() / "imagize" + self.video_root = root.to_path() / "videos" + self.stat_collate_root = self.stat_root.to_path() / "collated" + self.graph_collate_root = self.graph_root.to_path() / "collated" + self.scratch_root = root.to_path() / "scratch" + self.root = root.to_path() def from_cmdline(args: argparse.Namespace) -> PathSet: @@ -128,7 +128,7 @@ def from_cmdline(args: argparse.Namespace) -> PathSet: args.controller) -def from_exp(sierra_rpath: str, +def from_exp(sierra_root: str, project: str, batch_leaf: ExpRootLeaf, controller: str) -> PathSet: @@ -150,7 +150,7 @@ def from_exp(sierra_rpath: str, controller: The name of the controller used. """ - root = ExpRoot(sierra_rpath, + root = ExpRoot(sierra_root, project, controller, batch_leaf) diff --git a/sierra/core/experiment/bindings.py b/sierra/core/experiment/bindings.py index 997a8057..122e678c 100644 --- a/sierra/core/experiment/bindings.py +++ b/sierra/core/experiment/bindings.py @@ -135,7 +135,7 @@ class IExpRunShellCmdsGenerator(implements.Interface): cmdopts: Dictionary of parsed cmdline options. - n_robots: The configured # of robots for the experimental run. + n_agents: The configured # of robots for the experimental run. exp_num: The 0-based index of the experiment in the batch. @@ -144,7 +144,7 @@ class IExpRunShellCmdsGenerator(implements.Interface): def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria, - n_robots: int, + n_agents: int, exp_num: int) -> None: raise NotImplementedError diff --git a/sierra/core/experiment/spec.py b/sierra/core/experiment/spec.py index 5b1dc1ac..021e5b8d 100755 --- a/sierra/core/experiment/spec.py +++ b/sierra/core/experiment/spec.py @@ -33,7 +33,7 @@ def __init__(self, self.mods = [] self.is_compound = False - assert len(self.rms) == 0,\ + assert len(self.rms) == 0, \ "Batch criteria cannot remove XML tags" if self.chgs: @@ -79,7 +79,7 @@ def __init__(self, self.is_compound = True self.mods = [] - assert len(self.rms) == 0,\ + assert len(self.rms) == 0, \ "Batch criteria cannot remove XML tags" if self.chgs and self.adds: @@ -125,12 +125,13 @@ class ExperimentSpec(): def __init__(self, criteria: bc.IConcreteBatchCriteria, + batch_input_root: pathlib.Path, exp_num: int, cmdopts: types.Cmdopts) -> None: self.exp_num = exp_num - exp_name = criteria.gen_exp_names(cmdopts)[exp_num] + exp_name = criteria.gen_exp_names()[exp_num] - self.exp_input_root = pathlib.Path(cmdopts['batch_input_root'], exp_name) + self.exp_input_root = batch_input_root / exp_name self.exp_def_fpath = self.exp_input_root / config.kPickleLeaf self.logger = logging.getLogger(__name__) diff --git a/sierra/core/exproot.py b/sierra/core/exproot.py new file mode 100644 index 00000000..305f724b --- /dev/null +++ b/sierra/core/exproot.py @@ -0,0 +1,29 @@ +# +# Copyright 2024 John Harwell, All rights reserved. +# +# SPDX-License Identifier: MIT +# + +# Core packages +import typing as tp + +# 3rd party packages + +# Project packages +from sierra.core import batchroot + + +class PathSet(): + def __init__(self, + batch: batchroot.PathSet, + exp_name: str, + exp0_name: tp.Optional[str] = None) -> None: + self.input_root = batch.input_root.to_path() / exp_name + self.output_root = batch.output_root.to_path() / exp_name + self.graph_root = batch.graph_root.to_path() / exp_name + self.model_root = batch.model_root.to_path() / exp_name + self.stat_root = batch.state_root.to_path() / exp_name + + if exp0_name: + self.exp0_output_root = batch.output_root.to_path() / exp0_name + self.exp0_stat_root = batch.stat_root.to_path() / exp0_name diff --git a/sierra/core/generators/exp_creator.py b/sierra/core/generators/exp_creator.py index 94e3ced7..4274f4ef 100755 --- a/sierra/core/generators/exp_creator.py +++ b/sierra/core/generators/exp_creator.py @@ -21,7 +21,7 @@ # Project packages from sierra.core.variables import batch_criteria as bc -from sierra.core import config, utils, types, platform +from sierra.core import config, utils, types, platform, batchroot import sierra.core.plugin_manager as pm from sierra.core.generators.exp_generators import BatchExpDefGenerator from sierra.core.experiment import bindings, definition @@ -118,13 +118,13 @@ def from_def(self, exp_def: definition.XMLExpDef): if configurer.cmdfile_paradigm() == 'per-exp' and utils.path_exists(commands_fpath): commands_fpath.unlink() - n_robots = utils.get_n_robots(self.criteria.main_config, + n_agents = utils.get_n_agents(self.criteria.main_config, self.cmdopts, self.exp_input_root, exp_def) generator = platform.ExpRunShellCmdsGenerator(self.cmdopts, self.criteria, - n_robots, + n_agents, self.exp_num) # Create all experimental runs @@ -229,7 +229,7 @@ def _update_cmds_file(self, pre_specs = cmds_generator.pre_run_cmds(for_host, launch_stem_path, run_num) - assert all(spec.shell for spec in pre_specs),\ + assert all(spec.shell for spec in pre_specs), \ "All pre-exp commands are run in a shell" pre_cmds = [spec.cmd for spec in pre_specs] self.logger.trace("Pre-experiment cmds: %s", pre_cmds) # type: ignore @@ -237,13 +237,13 @@ def _update_cmds_file(self, exec_specs = cmds_generator.exec_run_cmds(for_host, launch_stem_path, run_num) - assert all(spec.shell for spec in exec_specs),\ + assert all(spec.shell for spec in exec_specs), \ "All exec-exp commands are run in a shell" exec_cmds = [spec.cmd for spec in exec_specs] self.logger.trace("Exec-experiment cmds: %s", exec_cmds) # type: ignore post_specs = cmds_generator.post_run_cmds(for_host) - assert all(spec.shell for spec in post_specs),\ + assert all(spec.shell for spec in post_specs), \ "All post-exp commands are run in a shell" post_cmds = [spec.cmd for spec in post_specs] self.logger.trace("Post-experiment cmds: %s", post_cmds) # type: ignore @@ -298,11 +298,12 @@ class BatchExpCreator: def __init__(self, criteria: bc.BatchCriteria, - cmdopts: types.Cmdopts) -> None: + cmdopts: types.Cmdopts, + pathset: batchroot.PathSet) -> None: self.batch_config_template = pathlib.Path(cmdopts['template_input_file']) - self.batch_input_root = pathlib.Path(cmdopts['batch_input_root']) - self.batch_output_root = pathlib.Path(cmdopts['batch_output_root']) + self.batch_input_root = pathset.input_root + self.batch_output_root = pathset.output_root self.criteria = criteria self.cmdopts = cmdopts self.logger = logging.getLogger(__name__) @@ -333,7 +334,7 @@ def create(self, generator: BatchExpDefGenerator) -> None: self.logger.debug( "Applying generated scenario+controller changes to exp%s", i) - expi = self.criteria.gen_exp_names(self.cmdopts)[i] + expi = self.criteria.gen_exp_names()[i] exp_output_root = self.batch_output_root / expi exp_input_root = self.batch_input_root / expi diff --git a/sierra/core/generators/exp_generators.py b/sierra/core/generators/exp_generators.py index e35c2bd0..c87e1ff4 100755 --- a/sierra/core/generators/exp_generators.py +++ b/sierra/core/generators/exp_generators.py @@ -27,7 +27,7 @@ # Project packages import sierra.core.generators.generator_factory as gf from sierra.core.experiment import spec, definition -from sierra.core import types +from sierra.core import types, batchroot import sierra.core.variables.batch_criteria as bc @@ -41,20 +41,6 @@ class BatchExpDefGenerator: batch_config_template: Absolute path to the root template XML configuration file. - batch_input_root: Root directory for all generated XML input files all - experiments should be stored (relative to current - dir or absolute). Each experiment will get a - directory within this root to store the xml input - files for the set of :term:`Experimental Runs - ` comprising an - :term:`Experiment`; directory name determined by - the batch criteria used. - - batch_output_root: Root directory for all experiment outputs (relative - to current dir or absolute). Each experiment will get - a directory 'exp' in this directory for its - outputs. - criteria: :class:`~sierra.core.variables.batch_criteria.BatchCriteria` derived object instance created from cmdline definition. @@ -66,6 +52,7 @@ class BatchExpDefGenerator: def __init__(self, criteria: bc.IConcreteBatchCriteria, + pathset: batchroot.PathSet, controller_name: str, scenario_basename: str, cmdopts: types.Cmdopts) -> None: @@ -77,8 +64,7 @@ def __init__(self, self.exp_template_stem = self.batch_config_template.stem self.batch_config_extension = None - self.batch_input_root = pathlib.Path(cmdopts['batch_input_root']) - self.batch_output_root = pathlib.Path(cmdopts['batch_output_root']) + self.pathset = pathset self.controller_name = controller_name self.scenario_basename = scenario_basename diff --git a/sierra/core/generators/generator_factory.py b/sierra/core/generators/generator_factory.py index 275c4f59..86ecf891 100755 --- a/sierra/core/generators/generator_factory.py +++ b/sierra/core/generators/generator_factory.py @@ -82,15 +82,15 @@ def _do_tag_add(self, # the platform relies on added tags to calculate population sizes, # then this won't work. controllers = config.kYAML.controllers - assert hasattr(self.spec.criteria, 'n_robots'),\ + assert hasattr(self.spec.criteria, 'n_agents'), \ (f"When using __UUID__ and tag_add in {controllers}, the batch " "criteria must implement bc.IQueryableBatchCriteria") - n_robots = self.spec.criteria.n_robots(self.spec.exp_num) + n_agents = self.spec.criteria.n_agents(self.spec.exp_num) - assert n_robots > 0,\ + assert n_agents > 0, \ "Batch criteria {self.spec.criteria} returned 0 robots?" - for robot_id in range(0, n_robots): + for robot_id in range(0, n_agents): to_pp = copy.deepcopy(add) pp_add = self._pp_for_tag_add(to_pp, robot_id) exp_def.tag_add(pp_add.path, diff --git a/sierra/core/models/interface.py b/sierra/core/models/interface.py index 01dc3d32..d4364ca5 100755 --- a/sierra/core/models/interface.py +++ b/sierra/core/models/interface.py @@ -16,7 +16,7 @@ # Project packages from sierra.core.variables import batch_criteria as bc -from sierra.core import types +from sierra.core import types, exproot class IConcreteIntraExpModel1D(implements.Interface): @@ -34,7 +34,8 @@ class IConcreteIntraExpModel1D(implements.Interface): def run(self, criteria: bc.IConcreteBatchCriteria, exp_num: int, - cmdopts: types.Cmdopts) -> tp.List[pd.DataFrame]: + cmdopts: types.Cmdopts, + pathset: exproot.PathSet) -> tp.List[pd.DataFrame]: """Run the model and generate a list of dataframes. Each dataframe can (potentially) target different graphs. All dataframes @@ -97,7 +98,8 @@ class IConcreteIntraExpModel2D(implements.Interface): def run(self, criteria: bc.IConcreteBatchCriteria, exp_num: int, - cmdopts: types.Cmdopts) -> tp.List[pd.DataFrame]: + cmdopts: types.Cmdopts, + pathset: exproot.PathSet) -> tp.List[pd.DataFrame]: """Run the model and generate a list of dataframes. Each dataframe can (potentially) target a different graph. Each @@ -150,7 +152,8 @@ class IConcreteInterExpModel1D(implements.Interface): def run(self, criteria: bc.IConcreteBatchCriteria, - cmdopts: types.Cmdopts) -> tp.List[pd.DataFrame]: + cmdopts: types.Cmdopts, + pathset: exproot.PathSet) -> tp.List[pd.DataFrame]: """Run the model and generate list of dataframes. Each dataframe can (potentially) target a different graph. Each diff --git a/sierra/core/pipeline/stage1/pipeline_stage1.py b/sierra/core/pipeline/stage1/pipeline_stage1.py index 9b38d783..d69bb1a7 100755 --- a/sierra/core/pipeline/stage1/pipeline_stage1.py +++ b/sierra/core/pipeline/stage1/pipeline_stage1.py @@ -15,7 +15,7 @@ from sierra.core.generators.exp_generators import BatchExpDefGenerator from sierra.core.generators.exp_creator import BatchExpCreator import sierra.core.variables.batch_criteria as bc -from sierra.core import types +from sierra.core import types, batchroot class PipelineStage1: @@ -30,6 +30,7 @@ class PipelineStage1: def __init__(self, cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, controller: str, criteria: bc.IConcreteBatchCriteria) -> None: self.generator = BatchExpDefGenerator(controller_name=controller, @@ -37,6 +38,7 @@ def __init__(self, criteria=criteria, cmdopts=cmdopts) self.creator = BatchExpCreator(criteria=criteria, cmdopts=cmdopts) + self.pathset = pathset self.cmdopts = cmdopts self.criteria = criteria @@ -69,7 +71,7 @@ def run(self) -> None: """ self.logger.info("Generating input files for batch experiment in %s...", - self.cmdopts['batch_root']) + self.pathset.root) self.creator.create(self.generator) n_exp_in_batch = len(self.criteria.gen_attr_changelist()) + \ diff --git a/sierra/core/pipeline/stage2/exp_runner.py b/sierra/core/pipeline/stage2/exp_runner.py index 954620c2..eb3b1fa1 100755 --- a/sierra/core/pipeline/stage2/exp_runner.py +++ b/sierra/core/pipeline/stage2/exp_runner.py @@ -165,7 +165,7 @@ def __call__(self) -> None: module.pre_exp_diagnostics(self.cmdopts, self.logger) exp_all = [self.batch_exp_root / d - for d in self.criteria.gen_exp_names(self.cmdopts)] + for d in self.criteria.gen_exp_names()] exp_to_run = utils.exp_range_calc(self.cmdopts, self.batch_exp_root, @@ -180,7 +180,8 @@ def __call__(self) -> None: # Calculate path for to file for logging execution times now = datetime.datetime.now() - exec_times_fpath = self.batch_stat_exec_root / now.strftime("%Y-%m-%e-%H:%M") + exec_times_fpath = self.pathset.stat_exec_root / \ + now.strftime("%Y-%m-%e-%H:%M") # Start a new process for the experiment shell so pre-run commands have # an effect (if they set environment variables, etc.). @@ -197,11 +198,12 @@ def __call__(self) -> None: for spec in generator.pre_exp_cmds(): shell.run_from_spec(spec) - runner = ExpRunner(self.cmdopts, + runner = ExpRunner(self.pathset, + self.cmdopts, exec_times_fpath, generator, shell) - runner(exp, exp_num) + runner(exp.name, exp_num) # Run cmds to cleanup platform-specific things now that the experiment # is done (if needed). @@ -219,6 +221,7 @@ class ExpRunner: """ def __init__(self, + pathset: batchroot.PathSet, cmdopts: types.Cmdopts, exec_times_fpath: pathlib.Path, generator: platform.ExpShellCmdsGenerator, @@ -228,14 +231,16 @@ def __init__(self, self.shell = shell self.generator = generator self.cmdopts = cmdopts + self.pathset = pathset self.logger = logging.getLogger(__name__) def __call__(self, - exp_input_root: pathlib.Path, + exp_name: str, exp_num: int) -> None: """Execute experimental runs for a single experiment. """ - + exp_input_root = self.pathset.input_root / exp_name + exp_scratch_root = self.pathset.scratch_root / exp_name self.logger.info("Running exp%s in '%s'", exp_num, exp_input_root) @@ -244,8 +249,7 @@ def __call__(self, wd = exp_input_root.relative_to(pathlib.Path().home()) start = time.time() - scratch_root = self.cmdopts['batch_scratch_root'] / exp_input_root.name - utils.dir_create_checked(scratch_root, exist_ok=True) + utils.dir_create_checked(exp_scratch_root, exist_ok=True) assert self.cmdopts['exec_jobs_per_node'] is not None, \ "# parallel jobs can't be None" @@ -253,7 +257,7 @@ def __call__(self, exec_opts = { 'exp_input_root': str(exp_input_root), 'work_dir': str(wd), - 'scratch_dir': str(scratch_root), + 'scratch_dir': str(exp_scratch_root), 'cmdfile_stem_path': str(exp_input_root / config.kGNUParallel['cmdfile_stem']), 'cmdfile_ext': config.kGNUParallel['cmdfile_ext'], 'exec_resume': self.cmdopts['exec_resume'], diff --git a/sierra/core/pipeline/stage3/collate.py b/sierra/core/pipeline/stage3/collate.py index c517e20e..d7a49468 100755 --- a/sierra/core/pipeline/stage3/collate.py +++ b/sierra/core/pipeline/stage3/collate.py @@ -31,7 +31,7 @@ # Project packages import sierra.core.variables.batch_criteria as bc import sierra.core.plugin_manager as pm -from sierra.core import types, storage, utils, config, pathset +from sierra.core import types, storage, utils, config, batchroot class ExpParallelCollator: @@ -74,7 +74,7 @@ def __call__(self, criteria: bc.IConcreteBatchCriteria) -> None: criteria) for exp in exp_to_proc: - gatherq.put((self.cmdopts['batch_output_root'], exp.name)) + gatherq.put((self.pathset.output_root, exp.name)) self.logger.debug("Starting %d gatherers, method=%s", n_gatherers, diff --git a/sierra/core/pipeline/stage3/pipeline_stage3.py b/sierra/core/pipeline/stage3/pipeline_stage3.py index 965d3928..5e6400b5 100755 --- a/sierra/core/pipeline/stage3/pipeline_stage3.py +++ b/sierra/core/pipeline/stage3/pipeline_stage3.py @@ -100,8 +100,7 @@ def _run_imagizing(self, intra_HM_config: dict, cmdopts: types.Cmdopts, criteria: bc.IConcreteBatchCriteria): - self.logger.info("Imagizing .csvs in %s...", - cmdopts['batch_output_root']) + self.logger.info("Imagizing .csvs in %s...", self.pathset.output_root) start = time.time() imagize.proc_batch_exp(main_config, cmdopts, intra_HM_config, criteria) elapsed = int(time.time() - start) diff --git a/sierra/core/pipeline/stage4/graphs/collate.py b/sierra/core/pipeline/stage4/graphs/collate.py index 3a8f13b4..c6ea0d8c 100755 --- a/sierra/core/pipeline/stage4/graphs/collate.py +++ b/sierra/core/pipeline/stage4/graphs/collate.py @@ -60,22 +60,23 @@ class UnivarGraphCollator: def __init__(self, main_config: types.YAMLDict, - cmdopts: types.Cmdopts) -> None: + cmdopts: types.Cmdopts, + pathset: batchroot.PathSet) -> None: self.main_config = main_config self.cmdopts = cmdopts + self.pathset = pathset self.logger = logging.getLogger(__name__) def __call__(self, criteria, - target: dict, - stat_collate_root: pathlib.Path) -> None: + target: dict) -> None: self.logger.info("Univariate files from batch in %s for graph '%s'...", - self.cmdopts['batch_output_root'], + self.pathset.output_root, target['src_stem']) self.logger.trace(json.dumps(target, indent=4)) # type: ignore exp_dirs = utils.exp_range_calc(self.cmdopts, - self.cmdopts['batch_output_root'], + self.pathset.output_root, criteria) # Always do the mean, even if stats are disabled @@ -98,12 +99,13 @@ def __call__(self, for stat in stats: if stat.all_srcs_exist: writer(stat.df, - stat_collate_root / (target['dest_stem'] + stat.df_ext), + self.pathset.stat_collate_root / + (target['dest_stem'] + stat.df_ext), index=False) elif not stat.all_srcs_exist and stat.some_srcs_exist: self.logger.warning("Not all experiments in '%s' produced '%s%s'", - self.cmdopts['batch_output_root'], + self.pathset.output_root, target['src_stem'], stat.df_ext) @@ -144,22 +146,23 @@ class BivarGraphCollator: def __init__(self, main_config: types.YAMLDict, - cmdopts: types.Cmdopts) -> None: + cmdopts: types.Cmdopts, + pathset: batchroot.PathSet) -> None: self.main_config = main_config self.cmdopts = cmdopts + self.pathset = pathset self.logger = logging.getLogger(__name__) def __call__(self, criteria: bc.IConcreteBatchCriteria, - target: dict, - stat_collate_root: pathlib.Path) -> None: + target: dict) -> None: self.logger.info("Bivariate files from batch in %s for graph '%s'...", - self.cmdopts['batch_output_root'], + self.pathset.output_root, target['src_stem']) self.logger.trace(json.dumps(target, indent=4)) # type: ignore exp_dirs = utils.exp_range_calc(self.cmdopts, - self.cmdopts['batch_output_root'], + self.pathset.output_root, criteria) xlabels, ylabels = utils.bivar_exp_labels_calc(exp_dirs) @@ -189,7 +192,7 @@ def __call__(self, row, stat.df_ext) writer(df, - stat_collate_root / name, + self.pathset.stat_collate_root / name, index=False) # TODO: Don't write this for now, until I find a better way of @@ -200,7 +203,7 @@ def __call__(self, elif stat.some_srcs_exist: self.logger.warning("Not all experiments in '%s' produced '%s%s'", - self.cmdopts['batch_output_root'], + self.pathset.output_root, target['src_stem'], stat.df_ext) diff --git a/sierra/core/pipeline/stage4/graphs/inter/generate.py b/sierra/core/pipeline/stage4/graphs/inter/generate.py index 77f64031..e6e33a5b 100755 --- a/sierra/core/pipeline/stage4/graphs/inter/generate.py +++ b/sierra/core/pipeline/stage4/graphs/inter/generate.py @@ -14,12 +14,13 @@ # Project packages from sierra.core.variables import batch_criteria as bc -from sierra.core import types, utils +from sierra.core import types, utils, batchroot from sierra.core.pipeline.stage4.graphs.inter import line, heatmap def generate(main_config: types.YAMLDict, + pathset: batchroot.PathSet, cmdopts: types.Cmdopts, LN_targets: tp.List[types.YAMLDict], HM_targets: tp.List[types.YAMLDict], @@ -58,8 +59,7 @@ def generate(main_config: types.YAMLDict, to get logging messages have unique logger names between this class and your derived class , in order to reduce confusion. """ - utils.dir_create_checked(cmdopts['batch_graph_collate_root'], - exist_ok=True) + utils.dir_create_checked(pathset.graph_collate_root, exist_ok=True) if criteria.is_univar(): if not cmdopts['project_no_LN']: diff --git a/sierra/core/pipeline/stage4/graphs/inter/heatmap.py b/sierra/core/pipeline/stage4/graphs/inter/heatmap.py index 8ecc1afb..3b99083b 100644 --- a/sierra/core/pipeline/stage4/graphs/inter/heatmap.py +++ b/sierra/core/pipeline/stage4/graphs/inter/heatmap.py @@ -15,7 +15,7 @@ import json # Project packages -from sierra.core import types, config, +from sierra.core import types, config, batchroot from sierra.core.variables import batch_criteria as bc from sierra.core.graphs.heatmap import Heatmap diff --git a/sierra/core/pipeline/stage4/graphs/inter/line.py b/sierra/core/pipeline/stage4/graphs/inter/line.py index 3bab1c25..18af1ab0 100644 --- a/sierra/core/pipeline/stage4/graphs/inter/line.py +++ b/sierra/core/pipeline/stage4/graphs/inter/line.py @@ -44,14 +44,12 @@ def generate( _gen_summary_linegraph(graph, pathset, cmdopts, - criteria, - pathset.graph_collate_root) + criteria) else: _gen_stacked_linegraph(graph, - pathset.stat_collate_root, + pathset, cmdopts, - criteria, - pathset.graph_collate_root) + criteria) def _gen_summary_linegraph(graph: types.YAMLDict, diff --git a/sierra/core/pipeline/stage4/graphs/intra/generate.py b/sierra/core/pipeline/stage4/graphs/intra/generate.py index 035beb6c..8b75f896 100755 --- a/sierra/core/pipeline/stage4/graphs/intra/generate.py +++ b/sierra/core/pipeline/stage4/graphs/intra/generate.py @@ -19,7 +19,7 @@ import sierra.core.variables.batch_criteria as bc import sierra.core.plugin_manager as pm -from sierra.core import types, utils +from sierra.core import types, utils, batchroot, exproot from sierra.core.pipeline.stage4.graphs.intra import line, heatmap _logger = logging.getLogger(__name__) @@ -27,6 +27,7 @@ def generate(main_config: types.YAMLDict, cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, controller_config: types.YAMLDict, LN_config: types.YAMLDict, HM_config: types.YAMLDict, @@ -51,33 +52,28 @@ def generate(main_config: types.YAMLDict, experiment. """ exp_to_gen = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], + pathset.output_root, criteria) + if not exp_to_gen: + return + + module = pm.module_load_tiered(project=cmdopts['project'], + path='pipeline.stage4.graphs.intra.generate') + + generator = module.IntraExpGraphGenerator(main_config, + controller_config, + LN_config, + HM_config, + cmdopts) for exp in exp_to_gen: - batch_output_root = pathlib.Path(cmdopts["batch_output_root"]) - batch_stat_root = pathlib.Path(cmdopts["batch_stat_root"]) - batch_input_root = pathlib.Path(cmdopts["batch_input_root"]) - batch_graph_root = pathlib.Path(cmdopts["batch_graph_root"]) - batch_model_root = pathlib.Path(cmdopts["batch_model_root"]) - - cmdopts = copy.deepcopy(cmdopts) - cmdopts["exp_input_root"] = str(batch_input_root / exp.name) - cmdopts["exp_output_root"] = str(batch_output_root / exp.name) - cmdopts["exp_graph_root"] = str(batch_graph_root / exp.name) - cmdopts["exp_model_root"] = str(batch_model_root / exp.name) - cmdopts["exp_stat_root"] = str(batch_stat_root / exp.name) - - if os.path.isdir(cmdopts["exp_stat_root"]): - generator = pm.module_load_tiered(project=cmdopts['project'], - path='pipeline.stage4.graphs.intra.generate') - generator.IntraExpGraphGenerator(main_config, - controller_config, - LN_config, - HM_config, - cmdopts)(criteria) + exproots = exproot.PathSet(pathset, exp.name) + + if os.path.isdir(exproots.stat_root): + generator(exproots, criteria) else: - _logger.warning("Skipping experiment '%s': %s does not exist", + _logger.warning("Skipping experiment '%s': % s does not exist, or " + "isn't a directory", exp, cmdopts['exp_stat_root']) @@ -132,9 +128,9 @@ def __init__(self, self.controller_config = controller_config self.logger = logging.getLogger(__name__) - utils.dir_create_checked(self.cmdopts["exp_graph_root"], exist_ok=True) - - def __call__(self, criteria: bc.IConcreteBatchCriteria) -> None: + def __call__(self, + pathset: exproot.PathSet, + criteria: bc.IConcreteBatchCriteria) -> None: """ Generate graphs. @@ -146,6 +142,8 @@ def __call__(self, criteria: bc.IConcreteBatchCriteria) -> None: #. :func:`~sierra.core.pipeline.stage4.graphs.intra.heatmap.generate()` to generate heatmaps for each experiment in the batch. """ + utils.dir_create_checked(self.pathset.graph_root, exist_ok=True) + LN_targets, HM_targets = self.calc_targets() if not self.cmdopts['project_no_LN']: diff --git a/sierra/core/pipeline/stage4/model_runner.py b/sierra/core/pipeline/stage4/model_runner.py index 7bbee84d..9628c0c5 100755 --- a/sierra/core/pipeline/stage4/model_runner.py +++ b/sierra/core/pipeline/stage4/model_runner.py @@ -15,7 +15,7 @@ # Project packages import sierra.core.variables.batch_criteria as bc -from sierra.core import models, types, utils, storage, config +from sierra.core import models, types, utils, storage, config, batchroot, exproot class IntraExpModelRunner: @@ -25,19 +25,21 @@ class IntraExpModelRunner: def __init__(self, cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, to_run: tp.List[tp.Union[models.interface.IConcreteIntraExpModel1D, models.interface.IConcreteIntraExpModel2D]]) -> None: self.cmdopts = cmdopts self.models = to_run + self.pathset = pathset self.logger = logging.getLogger(__name__) def __call__(self, main_config: types.YAMLDict, criteria: bc.IConcreteBatchCriteria) -> None: exp_to_run = utils.exp_range_calc(self.cmdopts, - self.cmdopts['batch_output_root'], + self.pathset.output_root, criteria) - exp_dirnames = criteria.gen_exp_names(self.cmdopts) + exp_dirnames = criteria.gen_exp_names() for exp in exp_to_run: self._run_models_in_exp(criteria, exp_dirnames, exp) @@ -48,30 +50,17 @@ def _run_models_in_exp(self, exp: pathlib.Path) -> None: exp_index = exp_dirnames.index(exp) - cmdopts = copy.deepcopy(self.cmdopts) - batch_output_root = pathlib.Path(self.cmdopts["batch_output_root"]) - batch_stat_root = pathlib.Path(self.cmdopts["batch_stat_root"]) - batch_input_root = pathlib.Path(self.cmdopts["batch_input_root"]) - batch_graph_root = pathlib.Path(self.cmdopts["batch_graph_root"]) - batch_model_root = pathlib.Path(self.cmdopts["batch_model_root"]) - - cmdopts["exp0_output_root"] = str(batch_output_root / exp_dirnames[0].name) - cmdopts["exp0_stat_root"] = str(batch_stat_root / exp_dirnames[0].name) - - cmdopts["exp_input_root"] = str(batch_input_root / exp.name) - cmdopts["exp_output_root"] = str(batch_output_root / exp.name) - cmdopts["exp_graph_root"] = str(batch_graph_root / exp.name) - cmdopts["exp_stat_root"] = str(batch_stat_root / exp.name) - cmdopts["exp_model_root"] = str(batch_model_root / exp.name) + exproots = exproot.PathSet(self.pathset, exp.name, exp_dirnames[0].name) - utils.dir_create_checked(cmdopts['exp_model_root'], exist_ok=True) + utils.dir_create_checked(exproots.model_root, exist_ok=True) for model in self.models: - self._run_model_in_exp(criteria, cmdopts, exp_index, model) + self._run_model_in_exp(criteria, cmdopts, exproots, exp_index, model) def _run_model_in_exp(self, criteria: bc.IConcreteBatchCriteria, cmdopts: types.Cmdopts, + pathset: exproot.PathSet, exp_index: int, model: tp.Union[models.interface.IConcreteIntraExpModel1D, models.interface.IConcreteIntraExpModel2D]) -> None: @@ -85,11 +74,11 @@ def _run_model_in_exp(self, self.logger.debug("Run intra-experiment model '%s' for exp%s", str(model), exp_index) - dfs = model.run(criteria, exp_index, cmdopts) + dfs = model.run(criteria, exp_index, cmdopts, pathset) writer = storage.DataFrameWriter('storage.csv') for df, csv_stem in zip(dfs, model.target_csv_stems()): - path_stem = pathlib.Path(cmdopts['exp_model_root']) / csv_stem + path_stem = pathset.model_root / csv_stem # Write model legend file so the generated graph can find it with utils.utf8open(path_stem.with_suffix(config.kModelsExt['legend']), @@ -112,8 +101,10 @@ class InterExpModelRunner: """ def __init__(self, + pathset: batchroot.PathSet, cmdopts: types.Cmdopts, to_run: tp.List[models.interface.IConcreteInterExpModel1D]) -> None: + self.pathset = pathset self.cmdopts = cmdopts self.models = to_run self.logger = logging.getLogger(__name__) @@ -124,10 +115,8 @@ def __call__(self, cmdopts = copy.deepcopy(self.cmdopts) - utils.dir_create_checked( - cmdopts['batch_model_root'], exist_ok=True) - utils.dir_create_checked( - cmdopts['batch_graph_collate_root'], exist_ok=True) + utils.dir_create_checked(self.pathset.model_root, exist_ok=True) + utils.dir_create_checked(self.pathset.graph_collate_root, exist_ok=True) for model in self.models: if not model.run_for_batch(criteria, cmdopts): @@ -138,10 +127,10 @@ def __call__(self, # Run the model self.logger.debug("Run inter-experiment model '%s'", str(model)) - dfs = model.run(criteria, cmdopts) + dfs = model.run(criteria, cmdopts, self.pathset) for df, csv_stem in zip(dfs, model.target_csv_stems()): - path_stem = pathlib.Path(cmdopts['batch_model_root']) / csv_stem + path_stem = self.model_root / csv_stem # Write model .csv file writer = storage.DataFrameWriter('storage.csv') diff --git a/sierra/core/pipeline/stage4/render.py b/sierra/core/pipeline/stage4/render.py index a9e870b1..f212100a 100755 --- a/sierra/core/pipeline/stage4/render.py +++ b/sierra/core/pipeline/stage4/render.py @@ -29,7 +29,7 @@ # Project packages import sierra.core.variables.batch_criteria as bc -from sierra.core import types, config, utils +from sierra.core import types, config, utils, batchroot _logger = logging.getLogger(__name__) @@ -118,6 +118,7 @@ def _worker(q: mp.Queue, main_config: types.YAMLDict) -> None: def from_platform(main_config: types.YAMLDict, + pathset: batchroot.PathSet, cmdopts: types.Cmdopts, criteria: bc.IConcreteBatchCriteria) -> None: """Render frames (images) captured in by a platform into videos. @@ -139,12 +140,12 @@ def from_platform(main_config: types.YAMLDict, .. note:: This currently only works with PNG images. """ exp_to_render = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], + pathset.output_root, criteria) inputs = [] for exp in exp_to_render: - output_dir = pathlib.Path(cmdopts['batch_video_root'], exp.name) + output_dir = pathset.video_root / exp.name for run in exp.iterdir(): platform = cmdopts['platform'].split('.')[1] @@ -161,6 +162,7 @@ def from_platform(main_config: types.YAMLDict, def from_project_imagized(main_config: types.YAMLDict, + pathset: batchroot.PathSet, cmdopts: types.Cmdopts, criteria: bc.IConcreteBatchCriteria) -> None: """Render THINGS previously imagized in a project in stage 3 into videos. @@ -182,17 +184,16 @@ def from_project_imagized(main_config: types.YAMLDict, .. note:: This currently only works with PNG images. """ exp_to_render = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], + pathset.output_root, criteria) inputs = [] for exp in exp_to_render: - exp_imagize_root = pathlib.Path(cmdopts['batch_imagize_root'], - exp.name) + exp_imagize_root = pathset.imagize_root / exp.name if not exp_imagize_root.exists(): continue - output_dir = pathlib.Path(cmdopts['batch_video_root'], exp.name) + output_dir = pathset.videoroot / exp.name for candidate in exp_imagize_root.iterdir(): if candidate.is_dir(): @@ -208,6 +209,7 @@ def from_project_imagized(main_config: types.YAMLDict, def from_bivar_heatmaps(main_config: types.YAMLDict, + pathset: batchroot.PathSet, cmdopts: types.Cmdopts, criteria: bc.IConcreteBatchCriteria) -> None: """Render inter-experiment heatmaps into videos. @@ -227,14 +229,12 @@ def from_bivar_heatmaps(main_config: types.YAMLDict, versionadded:: 1.2.20 """ - graph_root = pathlib.Path(cmdopts['batch_graph_collate_root']) inputs = [] - for candidate in graph_root.iterdir(): + for candidate in pathset.graph_collate_root.iterdir(): if "HM-" in candidate.name and candidate.is_dir(): - output_dir = pathlib.Path(cmdopts['batch_video_root'], - candidate.name) + output_dir = pathset.video_root / candidate.name opts = { 'input_dir': str(candidate), diff --git a/sierra/core/pipeline/stage5/inter_scenario_comparator.py b/sierra/core/pipeline/stage5/inter_scenario_comparator.py index a933cc04..87153a1d 100755 --- a/sierra/core/pipeline/stage5/inter_scenario_comparator.py +++ b/sierra/core/pipeline/stage5/inter_scenario_comparator.py @@ -25,7 +25,7 @@ from sierra.core.variables import batch_criteria as bc import sierra.core.plugin_manager as pm from sierra.core import types, utils, config, storage, batchroot -from sierra.core.pip.line.stage5 import outputroot +from sierra.core.pipeline.stage5 import outputroot class UnivarInterScenarioComparator: @@ -153,7 +153,7 @@ def _compare_across_scenarios(self, self.scenarios[0]) self._gen_csvs(pathset=pathset, - proect=self.cli_args.project, + project=self.cli_args.project, batch_leaf=batch_leaf, src_stem=graph['src_stem'], dest_stem=graph['dest_stem']) @@ -231,9 +231,7 @@ def _gen_csvs(self, """ - csv_ipath_stem = pathlib.Path(pathset.output_root, - pathset.stat_collate_root, - src_stem) + csv_ipath_stem = pathset.stat_collate_root / src_stem # Some experiments might not generate the necessary performance measure # CSVs for graph generation, which is OK. @@ -269,8 +267,8 @@ def _gen_csvs(self, # Can't use with_suffix() for opath, because that path contains the # controller, which already has a '.' in it. model_istem = pathlib.Path(pathset.model_root, src_stem) - model_ostem = pathlib.Path(self.stage5_roots.model_root, - dest_stem + "-" + self.controller) + model_ostem = self.stage5_roots.model_root / \ + (dest_stem + "-" + self.controller) model_ipath = model_istem.with_suffix(config.kModelsExt['model']) model_opath = model_ostem.with_name( diff --git a/sierra/core/pipeline/stage5/intra_scenario_comparator.py b/sierra/core/pipeline/stage5/intra_scenario_comparator.py index 7a93c4e0..5cc3df91 100755 --- a/sierra/core/pipeline/stage5/intra_scenario_comparator.py +++ b/sierra/core/pipeline/stage5/intra_scenario_comparator.py @@ -26,7 +26,7 @@ from sierra.core.graphs.heatmap import Heatmap, DualHeatmap from sierra.core.variables import batch_criteria as bc from sierra.core import types, utils, config, storage, batchroot -from sierra.core.pipeline.stage5 import leafcalc, preprocess +from sierra.core.pipeline.stage5 import leafcalc, preprocess, outputroot class UnivarIntraScenarioComparator: @@ -42,11 +42,7 @@ class UnivarIntraScenarioComparator: controllers: List of controller names to compare. - cc_csv_root: Absolute directory path to the location controller CSV - files should be output to. - - cc_graph_root: Absolute directory path to the location the generated - graphs should be output to. + stage5_roots: Set of directory paths for stage 5 file generation. cmdopts: Dictionary of parsed cmdline parameters. @@ -63,15 +59,14 @@ class UnivarIntraScenarioComparator: def __init__(self, controllers: tp.List[str], - pathset: batchroot.PathSet, - cc_csv_root: pathlib.Path, - cc_graph_root: pathlib.Path, + batch_roots: batchroot.PathSet, + stage5_roots: outputroot.PathSet, cmdopts: types.Cmdopts, cli_args, main_config: types.YAMLDict) -> None: self.controllers = controllers - self.cc_graph_root = cc_graph_root - self.cc_csv_root = cc_csv_root + self.batch_roots = batch_roots + self.stage5_roots = stage5_roots self.cmdopts = cmdopts self.cli_args = cli_args @@ -141,22 +136,22 @@ def _compare_in_scenario(self, # experiment (which # lives inside of the scenario dir), because # they are all different. We need generate these paths for EACH # controller, because the controller is part of the batch root path. - paths = batchroot.from_exp(sierra_rpath=self.cli_args.sierra_root, - project=self.cli_args.project, - batch_leaf=batch_leaf, - controller=controller) - cmdopts.update(paths) + pathset = batchroot.from_exp(sierra_root=self.cli_args.sierra_root, + project=self.cli_args.project, + batch_leaf=batch_leaf, + controller=controller) # For each scenario, we have to create the batch criteria for it, # because they are all different. - criteria = bc.factory(self.main_config, cmdopts, + pathset, self.cli_args, batch_leaf.scenario) self._gen_csv(batch_leaf=batch_leaf.to_path(), criteria=criteria, + pathset=pathset, cmdopts=cmdopts, controller=controller, src_stem=graph['src_stem'], @@ -175,7 +170,7 @@ def _compare_in_scenario(self, def _gen_csv(self, batch_leaf: str, criteria: bc.IConcreteBatchCriteria, - cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, controller: str, src_stem: str, dest_stem: str, @@ -187,8 +182,8 @@ def _gen_csv(self, """ self.logger.debug("Gathering data for '%s' from %s -> %s", controller, src_stem, dest_stem) - ipath = pathlib.Path(cmdopts['batch_stat_collate_root'], - src_stem + config.kStats['mean'].exts['mean']) + ipath = pathset.stat_collate_root / \ + (src_stem + config.kStats['mean'].exts['mean']) # Some experiments might not generate the necessary performance measure # .csvs for graph generation, which is OK. @@ -198,9 +193,9 @@ def _gen_csv(self, controller) return - preparer = preprocess.IntraExpPreparer(ipath_stem=cmdopts['batch_stat_collate_root'], + preparer = preprocess.IntraExpPreparer(ipath_stem=pathset.stat_collate_root, ipath_leaf=src_stem, - opath_stem=self.cc_csv_root, + opath_stem=self.stage5_roots.csv_root, n_exp=criteria.n_exp()) opath_leaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, None) preparer.across_rows(opath_leaf=opath_leaf, index=0, inc_exps=inc_exps) @@ -227,9 +222,9 @@ def _gen_graph(self, xticks = utils.exp_include_filter( inc_exps, xticks, criteria.n_exp()) - opath = self.cc_graph_root / (opath_leaf + config.kImageExt) + opath = self.stage5_roots.graph_root / (opath_leaf + config.kImageExt) - SummaryLineGraph(stats_root=self.cc_csv_root, + SummaryLineGraph(stats_root=self.stage5_roots.csv_root, input_stem=opath_leaf, output_fpath=opath, stats=cmdopts['dist_stats'], @@ -256,12 +251,6 @@ class BivarIntraScenarioComparator: controllers: List of controller names to compare. - cc_csv_root: Absolute directory path to the location controller CSV - files should be output to. - - cc_graph_root: Absolute directory path to the location the generated - graphs should be output to. - cmdopts: Dictionary of parsed cmdline parameters. cli_args: :class:`argparse` object containing the cmdline @@ -276,21 +265,19 @@ class BivarIntraScenarioComparator: def __init__(self, controllers: tp.List[str], - cc_csv_root: pathlib.Path, - cc_graph_root: pathlib.Path, + stage5_roots: outputroot.PathSet, cmdopts: types.Cmdopts, cli_args: argparse.Namespace, main_config: types.YAMLDict) -> None: self.controllers = controllers - self.cc_csv_root = cc_csv_root - self.cc_graph_root = cc_graph_root + self.stage5_roots = stage5_roots self.cmdopts = cmdopts self.cli_args = cli_args self.main_config = main_config self.logger = logging.getLogger(__name__) - self.logger.debug("csv_root=%s", str(self.cc_csv_root)) - self.logger.debug("graph_root=%s", str(self.cc_graph_root)) + self.logger.debug("csv_root=%s", str(self.stage5_roots.csv_root)) + self.logger.debug("graph_root=%s", str(self.stage5_roots.graph_root)) self.project_root = pathlib.Path(self.cmdopts['sierra_root'], self.cmdopts['project']) @@ -424,7 +411,7 @@ def _compare_in_scenario(self, comp_type=comp_type) def _gen_csvs_for_2D_or_3D(self, - cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, batch_leaf: str, controller: str, src_stem: str, @@ -434,7 +421,7 @@ def _gen_csvs_for_2D_or_3D(self, 1 CSV per controller, for 2D/3D comparison types only. Because each CSV file corresponding to performance measures are 2D arrays, we actually just copy and rename the performance measure CSV files for each - controllers into :attr:`cc_csv_root`. + controllers into :attr:`stage5_roots.csv_root`. :class:`~sierra.core.graphs.stacked_surface_graph.StackedSurfaceGraph` expects an ``_[0-9]+.csv`` pattern for each 2D surfaces to graph in @@ -449,8 +436,8 @@ def _gen_csvs_for_2D_or_3D(self, self.logger.debug("Gathering data for '%s' from %s -> %s", controller, src_stem, dest_stem) - csv_ipath = pathlib.Path(cmdopts['batch_stat_collate_root'], - src_stem + config.kStats['mean'].exts['mean']) + csv_ipath = pathset.stat_collate_root / \ + (src_stem + config.kStats['mean'].exts['mean']) # Some experiments might not generate the necessary performance measure # .csvs for graph generation, which is OK. @@ -466,7 +453,7 @@ def _gen_csvs_for_2D_or_3D(self, dest_stem, [self.controllers.index(controller)]) - opath_stem = self.cc_csv_root / opath_leaf + opath_stem = self.stage5_roots.csv_root / opath_leaf opath = opath_stem.with_name( opath_stem.name + config.kStats['mean'].exts['mean']) writer = storage.DataFrameWriter('storage.csv') @@ -474,7 +461,8 @@ def _gen_csvs_for_2D_or_3D(self, def _gen_csvs_for_1D(self, cmdopts: types.Cmdopts, - criteria: bc.IConcreteBatchCriteria, + pathset: batchroot.PathSet, + criteria: bc.BivarBatchCriteria, batch_leaf: str, controller: str, src_stem: str, @@ -492,8 +480,8 @@ def _gen_csvs_for_1D(self, self.logger.debug("Gathering data for '%s' from %s -> %s", controller, src_stem, dest_stem) - csv_ipath = pathlib.Path(cmdopts['batch_stat_collate_root'], - src_stem + config.kStats['mean'].exts['mean']) + csv_ipath = pathset.stat_collate_root / \ + src_stem + config.kStats['mean'].exts['mean'] # Some experiments might not generate the necessary performance measure # .csvs for graph generation, which is OK. @@ -509,14 +497,14 @@ def _gen_csvs_for_1D(self, "generation: no stats will be included")) if primary_axis == 0: - preparer = preprocess.IntraExpPreparer(ipath_stem=cmdopts['batch_stat_collate_root'], + preparer = preprocess.IntraExpPreparer(ipath_stem=pathset.stat_collate_root, ipath_leaf=src_stem, - opath_stem=self.cc_csv_root, + opath_stem=self.stage5_roots.csv_root, n_exp=criteria.criteria2.n_exp()) reader = storage.DataFrameReader('storage.csv') - ipath = pathlib.Path(cmdopts['batch_stat_collate_root'], - src_stem + config.kStats['mean'].exts['mean']) + ipath = pathset.stat_collate_root / \ + (src_stem + config.kStats['mean'].exts['mean']) n_rows = len(reader(ipath).index) for i in range(0, n_rows): @@ -527,12 +515,12 @@ def _gen_csvs_for_1D(self, index=i, inc_exps=inc_exps) else: - preparer = preprocess.IntraExpPreparer(ipath_stem=cmdopts['batch_stat_collate_root'], + preparer = preprocess.IntraExpPreparer(ipath_stem=pathset.stat_collate_root, ipath_leaf=src_stem, - opath_stem=self.cc_csv_root, + opath_stem=self.stage5_roots.csv_root, n_exp=criteria.criteria1.n_exp()) - exp_dirs = criteria.gen_exp_names(cmdopts) + exp_dirs = criteria.gen_exp_names() xlabels, ylabels = utils.bivar_exp_labels_calc(exp_dirs) xlabels = utils.exp_include_filter( inc_exps, xlabels, criteria.criteria1.n_exp()) @@ -549,6 +537,7 @@ def _gen_csvs_for_1D(self, def _gen_graphs1D(self, batch_leaf: str, criteria: bc.BivarBatchCriteria, + pathset: batchroot.PathSet, cmdopts: types.Cmdopts, dest_stem: str, title: str, @@ -557,14 +546,15 @@ def _gen_graphs1D(self, inc_exps: tp.Optional[str], legend: tp.List[str]) -> None: oleaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, None) - csv_stem_root = self.cc_csv_root / oleaf + csv_stem_root = self.stage5_roots.csv_root / oleaf pattern = str(csv_stem_root) + '*' + config.kStats['mean'].exts['mean'] paths = [f for f in glob.glob(pattern) if re.search('_[0-9]+', f)] for i in range(0, len(paths)): opath_leaf = leafcalc.from_batch_leaf( batch_leaf, dest_stem, [i]) - img_opath = self.cc_graph_root / (opath_leaf + config.kImageExt) + img_opath = self.stage5_roots.graph_root / \ + (opath_leaf + config.kImageExt) if primary_axis == 0: n_exp = criteria.criteria1.n_exp() @@ -590,11 +580,11 @@ def _gen_graphs1D(self, xlabel = criteria.graph_xlabel(cmdopts) # TODO: Fix no statistics support for these graphs - SummaryLineGraph(stats_root=self.cc_csv_root, + SummaryLineGraph(stats_root=self.stage5_roots.csv_root, input_stem=opath_leaf, stats='none', output_fpath=img_opath, - model_root=cmdopts['batch_model_root'], + model_root=pathset.model_root, title=title, xlabel=xlabel, ylabel=label, @@ -643,20 +633,20 @@ def _gen_paired_heatmaps(self, Uses a configured controller of primary interest against all other controllers (one graph per pairing), after input files have been - gathered from each controller into :attr:`cc_csv_root`. + gathered from each controller into :attr:`stage5_roots.csv_root`. """ opath_leaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, None) - opath = self.cc_graph_root / (opath_leaf + config.kImageExt) - pattern = self.cc_csv_root / (opath_leaf + '*' + - config.kStats['mean'].exts['mean']) + opath = self.stage5_roots.graph_root / (opath_leaf + config.kImageExt) + pattern = self.stage5_roots.csv_root / (opath_leaf + '*' + + config.kStats['mean'].exts['mean']) paths = [pathlib.Path(f) for f in glob.glob(str(pattern)) if re.search(r'_[0-9]+\.', f)] self.logger.debug("Generating paired heatmaps in %s -> %s", pattern, - [str(f.relative_to(self.cc_csv_root)) for f in paths]) + [str(f.relative_to(self.stage5_roots.csv_root)) for f in paths]) if len(paths) < 2: self.logger.warning(("Not enough matches from pattern='%s'--" @@ -680,8 +670,9 @@ def _gen_paired_heatmaps(self, leaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, [0, i]) + '_paired' - ipath = self.cc_csv_root / (leaf + config.kStats['mean'].exts['mean']) - opath = self.cc_graph_root / (leaf + config.kImageExt) + ipath = self.stage5_roots.csv_root / \ + (leaf + config.kStats['mean'].exts['mean']) + opath = self.stage5_roots.graph_root / (leaf + config.kImageExt) writer = storage.DataFrameWriter('storage.csv') writer(plot_df, ipath, index=False) @@ -709,21 +700,21 @@ def _gen_dual_heatmaps(self, Graphs contain all pairings of (primary controller, other), one per graph, within the specified scenario after input files have been - gathered from each controller into :attr:`cc_csv_root`. Only valid if + gathered from each controller into :attr:`stage5_roots.csv_root`. Only valid if the comparison type is ``HMraw``. """ opath_leaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, None) - opath = self.cc_graph_root / (opath_leaf + config.kImageExt) - pattern = self.cc_csv_root / (opath_leaf + '*' + - config.kStats['mean'].exts['mean']) + opath = self.stage5_roots.graph_root / (opath_leaf + config.kImageExt) + pattern = self.stage5_roots.csv_root / (opath_leaf + '*' + + config.kStats['mean'].exts['mean']) paths = [pathlib.Path(f) for f in glob.glob(str(pattern)) if re.search('_[0-9]+', f)] self.logger.debug("Generating dual heatmaps in %s -> %s", pattern, - [str(f.relative_to(self.cc_csv_root)) for f in paths]) + [str(f.relative_to(self.stage5_roots.csv_root)) for f in paths]) DualHeatmap(ipaths=paths, output_fpath=opath, @@ -749,20 +740,20 @@ def _gen_graph3D(self, Graph contains the specified controllers within thes pecified scenario after input files have been gathered from each controllers into - :attr:`cc_csv_root`. + :attr:`stage5_roots.csv_root`. """ opath_leaf = leafcalc.from_batch_leaf(batch_leaf, dest_stem, None) - opath = self.cc_graph_root / (opath_leaf + config.kImageExt) - pattern = self.cc_csv_root / (opath_leaf + '*' + - config.kStats['mean'].exts['mean']) + opath = self.stage5_roots.graph_root / (opath_leaf + config.kImageExt) + pattern = self.stage5_roots.csv_root / (opath_leaf + '*' + + config.kStats['mean'].exts['mean']) paths = [pathlib.Path(f) for f in glob.glob( str(pattern)) if re.search('_[0-9]+', f)] self.logger.debug("Generating stacked surface graphs in %s -> %s", pattern, - [str(f.relative_to(self.cc_csv_root)) for f in paths]) + [str(f.relative_to(self.stage5_roots.csv_root)) for f in paths]) StackedSurfaceGraph(ipaths=paths, output_fpath=opath, diff --git a/sierra/core/platform.py b/sierra/core/platform.py index 60f044f9..2cd70bec 100644 --- a/sierra/core/platform.py +++ b/sierra/core/platform.py @@ -61,7 +61,7 @@ class ExpRunShellCmdsGenerator(): def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria, - n_robots: int, + n_agents: int, exp_num: int) -> None: self.cmdopts = cmdopts self.criteria = criteria @@ -70,7 +70,7 @@ def __init__(self, if hasattr(module, 'ExpRunShellCmdsGenerator'): self.platform = module.ExpRunShellCmdsGenerator(self.cmdopts, self.criteria, - n_robots, + n_agents, exp_num) else: self.platform = None @@ -79,7 +79,7 @@ def __init__(self, if hasattr(module, 'ExpRunShellCmdsGenerator'): self.env = module.ExpRunShellCmdsGenerator(self.cmdopts, self.criteria, - n_robots, + n_agents, exp_num) else: self.env = None diff --git a/sierra/core/ros1/callbacks.py b/sierra/core/ros1/callbacks.py index f0461f59..38ceaf41 100644 --- a/sierra/core/ros1/callbacks.py +++ b/sierra/core/ros1/callbacks.py @@ -21,7 +21,7 @@ def population_size_from_pickle(adds_def: tp.Union[xml.AttrChangeSet, main_config: types.YAMLDict, cmdopts: types.Cmdopts) -> int: for add in adds_def: - if 'name' in add.attr and 'n_robots' in add.attr['name']: + if 'name' in add.attr and 'n_agents' in add.attr['name']: return int(add.attr['value']) return 0 diff --git a/sierra/core/utils.py b/sierra/core/utils.py index 6d536554..a5903b0c 100755 --- a/sierra/core/utils.py +++ b/sierra/core/utils.py @@ -179,17 +179,15 @@ def get_primary_axis(criteria, return 1 -def exp_range_calc(cmdopts: types.Cmdopts, +def exp_range_calc(exp_range: str, root_dir: pathlib.Path, criteria) -> types.PathList: """ Get the range of experiments to run/do stuff with. SUPER USEFUL. """ - exp_all = [root_dir / d for d in criteria.gen_exp_names(cmdopts)] + exp_all = [root_dir / d for d in criteria.gen_exp_names()] - exp_range = cmdopts['exp_range'] - - if cmdopts['exp_range'] is not None: + if exp_range is not None: min_exp = int(exp_range.split(':')[0]) max_exp = int(exp_range.split(':')[1]) assert min_exp <= max_exp, \ @@ -319,7 +317,7 @@ def exp_template_path(cmdopts: types.Cmdopts, return batch_input_root / dirname / template.stem -def get_n_robots(main_config: types.YAMLDict, +def get_n_agents(main_config: types.YAMLDict, cmdopts: types.Cmdopts, exp_input_root: pathlib.Path, exp_def: definition.XMLExpDef) -> int: @@ -336,18 +334,18 @@ def get_n_robots(main_config: types.YAMLDict, # # 2. Getting it from the pickled experiment definition (i.e., from the # batch criteria which was used for this experiment). - n_robots = module.population_size_from_def(exp_def, + n_agents = module.population_size_from_def(exp_def, main_config, cmdopts) - if n_robots <= 0: + if n_agents <= 0: pkl_def = definition.unpickle(exp_input_root / config.kPickleLeaf) - n_robots = module.population_size_from_pickle(pkl_def, + n_agents = module.population_size_from_pickle(pkl_def, main_config, cmdopts) - assert n_robots > 0, "n_robots must be > 0" + assert n_agents > 0, "n_agents must be > 0" - return n_robots + return n_agents def df_fill(df: pd.DataFrame, policy: str) -> pd.DataFrame: @@ -413,7 +411,7 @@ def sphinx_ref(ref: str) -> str: 'apply_to_expdef', 'pickle_modifications', 'exp_template_path', - 'get_n_robots', + 'get_n_agents', 'df_fill', 'utf8open', ] diff --git a/sierra/core/variables/batch_criteria.py b/sierra/core/variables/batch_criteria.py index f9cc03ae..37f5fa0b 100755 --- a/sierra/core/variables/batch_criteria.py +++ b/sierra/core/variables/batch_criteria.py @@ -31,9 +31,9 @@ class IQueryableBatchCriteria(implements.Interface): """ - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: """ - Return the # of robots used for a given :term:`Experiment`. + Return the # of agents used for a given :term:`Experiment`. """ raise NotImplementedError @@ -208,8 +208,8 @@ def gen_tag_addlist(self) -> tp.List[xml.TagAddList]: def gen_files(self) -> None: pass - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: - """ + def gen_exp_names(self) -> tp.List[str]: + """ Generate list of experiment names from the criteria. Used for creating unique directory names for each experiment in the @@ -247,7 +247,7 @@ def pickle_exp_defs(self, cmdopts: types.Cmdopts) -> None: scaffold_spec = spec.scaffold_spec_factory(self) for exp in range(0, scaffold_spec.n_exps): - exp_dirname = self.gen_exp_names(cmdopts)[exp] + exp_dirname = self.gen_exp_names()[exp] # Pickling of batch criteria experiment definitions is the FIRST set # of changes to be pickled--all other changes come after. We append # to the pickle file by default, which allows any number of @@ -307,7 +307,7 @@ def _scaffold_expi(self, is_compound: bool, i: int, cmdopts: types.Cmdopts) -> None: - exp_dirname = self.gen_exp_names(cmdopts)[i] + exp_dirname = self.gen_exp_names()[i] exp_input_root = self.batch_input_root / exp_dirname utils.dir_create_checked(exp_input_root, @@ -402,7 +402,7 @@ def populations(self, if exp_names is not None: names = exp_names else: - names = self.gen_exp_names(cmdopts) + names = self.gen_exp_names() module = pm.pipeline.get_plugin_module(cmdopts['platform']) for d in names: @@ -489,7 +489,7 @@ def gen_tag_rmlist(self) -> tp.List[xml.TagRmList]: ret.extend(self.criteria2.gen_tag_rmlist()) return ret - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: """ Generate a SORTED list of strings for all experiment names. @@ -497,8 +497,8 @@ def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: parents. """ - list1 = self.criteria1.gen_exp_names(cmdopts) - list2 = self.criteria2.gen_exp_names(cmdopts) + list1 = self.criteria1.gen_exp_names() + list2 = self.criteria2.gen_exp_names() ret = [] for l1 in list1: @@ -514,10 +514,9 @@ def populations(self, cmdopts: types.Cmdopts) -> tp.List[tp.List[int]]: `gen_exp_names()` for each criteria along each axis. """ - names = self.gen_exp_names(cmdopts) + names = self.gen_exp_names() - sizes = [[0 for col in self.criteria2.gen_exp_names( - cmdopts)] for row in self.criteria1.gen_exp_names(cmdopts)] + sizes = [[0 for col in self.criteria2.gen_exp_names()] for row in self.criteria1.gen_exp_names()] n_chgs2 = len(self.criteria2.gen_attr_changelist()) n_adds2 = len(self.criteria2.gen_tag_addlist()) @@ -556,11 +555,11 @@ def exp_scenario_name(self, exp_num: int) -> str: "Bivariate batch criteria does not contain constant density") def graph_xticks(self, - cmdopts: types.Cmdopts, - exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: + exp_range: str, + batch_output_root: pathlib.Path) -> tp.List[float]: names = [] - all_dirs = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], + all_dirs = utils.exp_range_calc(exp_range, + batch_output_root, self) for c1 in self.criteria1.gen_exp_names(cmdopts): @@ -573,14 +572,14 @@ def graph_xticks(self, return self.criteria1.graph_xticks(cmdopts, names) def graph_yticks(self, - cmdopts: types.Cmdopts, - exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: + exp_range: str, + batch_output_root: pathlib.Path) -> tp.List[float]: names = [] - all_dirs = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], + all_dirs = utils.exp_range_calc(exp_range, + batch_output_root, self) - for c2 in self.criteria2.gen_exp_names(cmdopts): + for c2 in self.criteria2.gen_exp_names(): for y in all_dirs: leaf = y.name if c2 in leaf.split('+')[1]: @@ -590,37 +589,13 @@ def graph_yticks(self, return self.criteria2.graph_xticks(cmdopts, names) def graph_xticklabels(self, - cmdopts: types.Cmdopts, - exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[str]: - names = [] - all_dirs = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], - self) - - for c1 in self.criteria1.gen_exp_names(cmdopts): - for x in all_dirs: - leaf = x.name - if c1 in leaf.split('+')[0]: - names.append(leaf) - break - + exp_range: str, + batch_output_root: pathlib.Path) -> tp.List[str]: return self.criteria1.graph_xticklabels(cmdopts, names) def graph_yticklabels(self, - cmdopts: types.Cmdopts, - exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[str]: - names = [] - all_dirs = utils.exp_range_calc(cmdopts, - cmdopts['batch_output_root'], - self) - - for c2 in self.criteria2.gen_exp_names(cmdopts): - for y in all_dirs: - leaf = y.name - if c2 in leaf.split('+')[1]: - names.append(leaf) - break - + exp_range: str, + batch_output_root: pathlib.Path) -> tp.List[str]: return self.criteria2.graph_xticklabels(cmdopts, names) def graph_xlabel(self, cmdopts: types.Cmdopts) -> str: @@ -634,22 +609,23 @@ def set_batch_input_root(self, root: pathlib.Path) -> None: self.criteria1.batch_input_root = root self.criteria2.batch_input_root = root - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: n_chgs2 = len(self.criteria2.gen_attr_changelist()) n_adds2 = len(self.criteria2.gen_tag_addlist()) i = int(exp_num / (n_chgs2 + n_adds2)) j = exp_num % (n_chgs2 + n_adds2) - if hasattr(self.criteria1, 'n_robots'): - return self.criteria1.n_robots(i) - elif hasattr(self.criteria2, 'n_robots'): - return self.criteria2.n_robots(j) + if hasattr(self.criteria1, 'n_agents'): + return self.criteria1.n_agents(i) + elif hasattr(self.criteria2, 'n_agents'): + return self.criteria2.n_agents(j) raise NotImplementedError def factory(main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, args: argparse.Namespace, scenario: tp.Optional[str] = None) -> IConcreteBatchCriteria: if scenario is None: @@ -658,6 +634,7 @@ def factory(main_config: types.YAMLDict, if len(args.batch_criteria) == 1: return __univar_factory(main_config, cmdopts, + batch_input_root, args.batch_criteria[0], scenario) elif len(args.batch_criteria) == 2: @@ -665,6 +642,7 @@ def factory(main_config: types.YAMLDict, "Duplicate batch criteria passed" return __bivar_factory(main_config, cmdopts, + batch_input_root, args.batch_criteria, scenario) else: @@ -674,6 +652,7 @@ def factory(main_config: types.YAMLDict, def __univar_factory(main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, cli_arg: str, scenario) -> IConcreteBatchCriteria: """ @@ -689,9 +668,10 @@ def __univar_factory(main_config: types.YAMLDict, ret = bcfactory(cli_arg, main_config, cmdopts, + batch_input_root, scenario=scenario)() else: - ret = bcfactory(cli_arg, main_config, cmdopts)() + ret = bcfactory(cli_arg, main_config, cmdopts, batch_input_root)() logging.info("Create univariate batch criteria '%s' from '%s'", ret.__class__.__name__, @@ -701,10 +681,20 @@ def __univar_factory(main_config: types.YAMLDict, def __bivar_factory(main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, cli_arg: tp.List[str], scenario: str) -> IConcreteBatchCriteria: - criteria1 = __univar_factory(main_config, cmdopts, cli_arg[0], scenario) - criteria2 = __univar_factory(main_config, cmdopts, cli_arg[1], scenario) + criteria1 = __univar_factory(main_config, + cmdopts, + batch_input_root, + cli_arg[0], + scenario) + + criteria2 = __univar_factory(main_config, + cmdopts, + batch_input_root, + cli_arg[1], + scenario) # Project hook bc = pm.module_load_tiered(project=cmdopts['project'], diff --git a/sierra/core/variables/population_size.py b/sierra/core/variables/population_size.py index ef1f360a..de108d23 100644 --- a/sierra/core/variables/population_size.py +++ b/sierra/core/variables/population_size.py @@ -29,7 +29,7 @@ def graph_xticks(self, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: if exp_names is None: - exp_names = self.gen_exp_names(cmdopts) + exp_names = self.gen_exp_names() ret = list(map(float, self.populations(cmdopts, exp_names))) @@ -45,7 +45,7 @@ def graph_xticklabels(self, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[str]: if exp_names is None: - exp_names = self.gen_exp_names(cmdopts) + exp_names = self.gen_exp_names() ret = map(float, self.populations(cmdopts, exp_names)) @@ -74,7 +74,7 @@ def __call__(self, arg: str) -> types.CLIArgSpec: # remove batch criteria variable name, leaving only the spec sections = sections[1:] - assert len(sections) >= 1 and len(sections) <= 2,\ + assert len(sections) >= 1 and len(sections) <= 2, \ ("Spec must have 1 or 2 sections separated by '.'; " f"have {len(sections)} from '{arg}'") diff --git a/sierra/plugins/platform/argos/cmdline.py b/sierra/plugins/platform/argos/cmdline.py index dbf99efd..80085daf 100755 --- a/sierra/plugins/platform/argos/cmdline.py +++ b/sierra/plugins/platform/argos/cmdline.py @@ -290,7 +290,7 @@ def cmdopts_update(cli_args, cmdopts: types.Cmdopts) -> None: updates = { # Stage 1 - 'n_robots': cli_args.n_robots, + 'n_agents': cli_args.n_agents, 'exp_setup': cli_args.exp_setup, diff --git a/sierra/plugins/platform/argos/generators/platform_generators.py b/sierra/plugins/platform/argos/generators/platform_generators.py index 71daa2b0..8ebd06e6 100644 --- a/sierra/plugins/platform/argos/generators/platform_generators.py +++ b/sierra/plugins/platform/argos/generators/platform_generators.py @@ -69,7 +69,7 @@ def generate(self) -> definition.XMLExpDef: write_config=wr_config) # Generate # robots - self._generate_n_robots(exp_def) + self._generate_n_agents(exp_def) # Setup library self._generate_library(exp_def) @@ -118,18 +118,18 @@ def generate_physics(self, n_engines, engine_type) if cmdopts['physics_spatial_hash2D']: - assert hasattr(self.spec.criteria, 'n_robots'),\ + assert hasattr(self.spec.criteria, 'n_agents'), \ ("When using the 2D spatial hash, the batch " "criteria must implement bc.IQueryableBatchCriteria") - n_robots = self.spec.criteria.n_robots(self.spec.exp_num) + n_agents = self.spec.criteria.n_agents(self.spec.exp_num) else: - n_robots = None + n_agents = None module = pm.pipeline.get_plugin_module(cmdopts['platform']) robot_type = module.robot_type_from_def(exp_def) pe = physics_engines.factory(engine_type, n_engines, - n_robots, + n_agents, robot_type, cmdopts, extents) @@ -149,19 +149,19 @@ def generate_arena_shape(self, _, adds, chgs = utils.apply_to_expdef(shape, exp_def) utils.pickle_modifications(adds, chgs, self.spec.exp_def_fpath) - def _generate_n_robots(self, exp_def: definition.XMLExpDef) -> None: + def _generate_n_agents(self, exp_def: definition.XMLExpDef) -> None: """ Generate XML changes to setup # robots (if specified on cmdline). Writes generated changes to the simulation definition pickle file. """ - if self.cmdopts['n_robots'] is None: + if self.cmdopts['n_agents'] is None: return self.logger.trace(("Generating changes for # robots " # type: ignore "(all runs)")) chgs = population_size.PopulationSize.gen_attr_changelist_from_list( - [self.cmdopts['n_robots']]) + [self.cmdopts['n_agents']]) for a in chgs[0]: exp_def.attr_change(a.path, a.attr, a.value, True) diff --git a/sierra/plugins/platform/argos/plugin.py b/sierra/plugins/platform/argos/plugin.py index e3f28f9d..099f86c4 100644 --- a/sierra/plugins/platform/argos/plugin.py +++ b/sierra/plugins/platform/argos/plugin.py @@ -20,7 +20,7 @@ # Project packages from sierra.plugins.platform.argos import cmdline -from sierra.core import hpc, config, types, utils, platform +from sierra.core import hpc, config, types, utils, platform, batchroot from sierra.core.experiment import bindings, definition, xml import sierra.core.variables.batch_criteria as bc @@ -102,7 +102,7 @@ def _hpc_slurm(self, args: argparse.Namespace) -> None: def _hpc_local(self, args: argparse.Namespace) -> None: self.logger.debug("Configuring ARGoS for LOCAL execution") if any(stage in args.pipeline for stage in [1, 2]): - assert args.physics_n_engines is not None,\ + assert args.physics_n_engines is not None, \ '--physics-n-engines is required for --exec-env=hpc.local when running stage{1,2}' ppn_per_run_req = args.physics_n_engines @@ -153,7 +153,7 @@ class ExpRunShellCmdsGenerator(): def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria, - n_robots: int, + n_agents: int, exp_num: int) -> None: self.cmdopts = cmdopts self.display_port = -1 @@ -276,7 +276,7 @@ def __call__(self) -> None: version = packaging.version.parse(res.group(0)) min_version = config.kARGoS['min_version'] - assert version >= min_version,\ + assert version >= min_version, \ f"ARGoS version {version} < min required {min_version}" if self.cmdopts['platform_vc']: @@ -302,7 +302,7 @@ def arena_dims_from_criteria(criteria: bc.BatchCriteria) -> tp.List[utils.ArenaE d = utils.Vector3D.from_str(c.value) dims.append(utils.ArenaExtent(d)) - assert len(dims) > 0,\ + assert len(dims) > 0, \ "Scenario dimensions not contained in batch criteria" return dims @@ -328,10 +328,11 @@ def population_size_from_def(exp_def: definition.XMLExpDef, def pre_exp_diagnostics(cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, logger: logging.Logger) -> None: s = "batch_exp_root='%s',runs/exp=%s,threads/job=%s,n_jobs=%s" logger.info(s, - cmdopts['batch_root'], + pathset.root, cmdopts['n_runs'], cmdopts['physics_n_threads'], cmdopts['exec_jobs_per_node']) diff --git a/sierra/plugins/platform/argos/variables/physics_engines.py b/sierra/plugins/platform/argos/variables/physics_engines.py index c3f43912..01d0a000 100755 --- a/sierra/plugins/platform/argos/variables/physics_engines.py +++ b/sierra/plugins/platform/argos/variables/physics_engines.py @@ -62,7 +62,7 @@ def __init__(self, # If we are given multiple extents to map, we need to divide the # specified # of engines among them. self.n_engines = int(self.n_engines / float(len(self.extents))) - assert self.layout == 'uniform_grid2D',\ + assert self.layout == 'uniform_grid2D', \ "Only uniform_grid2D physics engine layout currently supported" self.logger = logging.getLogger(__name__) @@ -473,7 +473,7 @@ def __init__(self, def factory(engine_type: str, n_engines: int, - n_robots: tp.Optional[int], + n_agents: tp.Optional[int], robot_type: str, cmdopts: types.Cmdopts, extents: tp.List[ArenaExtent]) -> PhysicsEngines: @@ -484,11 +484,11 @@ def factory(engine_type: str, # remain so in the future, so we employ a factory function to make # implementation of diverging functionality easier later. if '2d' in engine_type: - if n_robots and cmdopts['physics_spatial_hash2D']: + if n_agents and cmdopts['physics_spatial_hash2D']: spatial_hash = { # Per ARGoS documentation in 'argos3 -q dynamics2d' 'cell_size': config.kARGoS['spatial_hash2D'][robot_type], - 'cell_num': n_robots / float(n_engines) * 10 + 'cell_num': n_agents / float(n_engines) * 10 } logging.debug(("Using 2D spatial hash for physics engines: " "cell_size=%f,cell_num=%d"), diff --git a/sierra/plugins/platform/argos/variables/population_constant_density.py b/sierra/plugins/platform/argos/variables/population_constant_density.py index 9a06300a..9928965a 100755 --- a/sierra/plugins/platform/argos/variables/population_constant_density.py +++ b/sierra/plugins/platform/argos/variables/population_constant_density.py @@ -13,6 +13,7 @@ import typing as tp import logging import math +import pathlib # 3rd party packages import implements @@ -58,11 +59,11 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: Vector3D(x, y, z)) # ARGoS won't start if there are 0 robots, so you always # need to put at least 1. - n_robots = int(extent.area() * + n_agents = int(extent.area() * (self.target_density / 100.0)) - if n_robots == 0: - n_robots = 1 - self.logger.warning(("n_robots set to 1 even though " + if n_agents == 0: + n_agents = 1 + self.logger.warning(("n_agents set to 1 even though " "calculated as 0 for area=%s," "density=%s"), str(extent.area()), @@ -70,9 +71,9 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: changeset.add(xml.AttrChange(".//arena/distribute/entity", "quantity", - str(n_robots))) + str(n_agents))) self.logger.debug("Calculated population size=%d for extent=%s,density=%s", - n_robots, + n_agents, str(extent), self.target_density) break @@ -80,7 +81,7 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: return self.attr_changes - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: changes = self.gen_attr_changelist() return ['exp' + str(x) for x in range(0, len(changes))] @@ -89,7 +90,7 @@ def graph_xticks(self, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: if exp_names is None: - exp_names = self.gen_exp_names(cmdopts) + exp_names = self.gen_exp_names() ret = list(map(float, self.populations(cmdopts, exp_names))) @@ -104,7 +105,7 @@ def graph_xticklabels(self, cmdopts: types.Cmdopts, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[str]: if exp_names is None: - exp_names = self.gen_exp_names(cmdopts) + exp_names = self.gen_exp_names() ret = map(float, self.populations(cmdopts, exp_names)) @@ -116,7 +117,7 @@ def graph_xlabel(self, cmdopts: types.Cmdopts) -> str: return r"Population Size" - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: return int(self.target_density / 100.0 * self.dimensions[exp_num].area()) @@ -148,6 +149,7 @@ def calc_dims(cmdopts: types.Cmdopts, def factory(cli_arg: str, main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, **kwargs) -> PopulationConstantDensity: """Create a :class:`PopulationConstantDensity` derived class. @@ -160,7 +162,7 @@ def __init__(self) -> None: PopulationConstantDensity.__init__(self, cli_arg, main_config, - cmdopts['batch_input_root'], + batch_input_root, attr["target_density"], dims, kw['scenario_tag']) diff --git a/sierra/plugins/platform/argos/variables/population_size.py b/sierra/plugins/platform/argos/variables/population_size.py index cc394daf..8df2feb0 100755 --- a/sierra/plugins/platform/argos/variables/population_size.py +++ b/sierra/plugins/platform/argos/variables/population_size.py @@ -67,17 +67,18 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: self.size_list) return self.attr_changes - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: changes = self.gen_attr_changelist() return ['exp' + str(x) for x in range(0, len(changes))] - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: return self.size_list[exp_num] def factory(cli_arg: str, main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, **kwargs) -> PopulationSize: """Create a :class:`PopulationSize` derived class from the cmdline definition. @@ -89,7 +90,7 @@ def __init__(self) -> None: PopulationSize.__init__(self, cli_arg, main_config, - cmdopts['batch_input_root'], + batch_input_root, max_sizes) return type(cli_arg, # type: ignore diff --git a/sierra/plugins/platform/argos/variables/population_variable_density.py b/sierra/plugins/platform/argos/variables/population_variable_density.py index 80435537..4e68fd30 100755 --- a/sierra/plugins/platform/argos/variables/population_variable_density.py +++ b/sierra/plugins/platform/argos/variables/population_variable_density.py @@ -13,6 +13,7 @@ import typing as tp import logging import numpy as np +import pathlib # 3rd party packages import implements @@ -50,19 +51,19 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: for density in self.densities: # ARGoS won't start if there are 0 robots, so you always # need to put at least 1. - n_robots = int(self.extent.area() * (density / 100.0)) - if n_robots == 0: - n_robots = 1 - self.logger.warning("n_robots set to 1 even though \ + n_agents = int(self.extent.area() * (density / 100.0)) + if n_agents == 0: + n_agents = 1 + self.logger.warning("n_agents set to 1 even though \ calculated as 0 for area=%d,density=%s", self.extent.area(), density) changeset = xml.AttrChangeSet(xml.AttrChange(".//arena/distribute/entity", "quantity", - str(n_robots))) + str(n_agents))) self.attr_changes.append(changeset) self.logger.debug("Calculated swarm size=%d for extent=%s,density=%s", - n_robots, + n_agents, str(self.extent), density) @@ -70,7 +71,7 @@ def gen_attr_changelist(self) -> tp.List[xml.AttrChangeSet]: return self.attr_changes - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: changes = self.gen_attr_changelist() return ['exp' + str(x) for x in range(0, len(changes))] @@ -79,7 +80,7 @@ def graph_xticks(self, exp_names: tp.Optional[tp.List[str]] = None) -> tp.List[float]: if exp_names is None: - exp_names = self.gen_exp_names(cmdopts) + exp_names = self.gen_exp_names() return [p / self.extent.area() for p in self.populations(cmdopts, exp_names)] @@ -92,13 +93,14 @@ def graph_xticklabels(self, def graph_xlabel(self, cmdopts: types.Cmdopts) -> str: return r"Population Density" - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: return int(self.extent.area() * self.densities[exp_num] / 100.0) def factory(cli_arg: str, main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, **kwargs) -> PopulationVariableDensity: """ Create a :class:`PopulationVariableDensity` derived class. @@ -118,7 +120,7 @@ def __init__(self) -> None: PopulationVariableDensity.__init__(self, cli_arg, main_config, - cmdopts['batch_input_root'], + batch_input_root, densities, extent) diff --git a/sierra/plugins/platform/ros1gazebo/plugin.py b/sierra/plugins/platform/ros1gazebo/plugin.py index e82502cd..a6a632c3 100644 --- a/sierra/plugins/platform/ros1gazebo/plugin.py +++ b/sierra/plugins/platform/ros1gazebo/plugin.py @@ -20,7 +20,7 @@ # Project packages from sierra.plugins.platform.ros1gazebo import cmdline -from sierra.core import hpc, platform, config, ros1, types +from sierra.core import hpc, platform, config, ros1, types, batchroot from sierra.core.experiment import bindings, definition, xml import sierra.core.variables.batch_criteria as bc @@ -119,7 +119,7 @@ class ExpRunShellCmdsGenerator(): def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria, - n_robots: int, + n_agents: int, exp_num: int) -> None: self.cmdopts = cmdopts self.gazebo_port = -1 @@ -278,15 +278,15 @@ def __call__(self) -> None: keys = ['ROS_DISTRO', 'ROS_VERSION'] for k in keys: - assert k in os.environ,\ + assert k in os.environ, \ f"Non-ROS+Gazebo environment detected: '{k}' not found" # Check ROS distro - assert os.environ['ROS_DISTRO'] in ['kinetic', 'noetic'],\ + assert os.environ['ROS_DISTRO'] in ['kinetic', 'noetic'], \ "SIERRA only supports ROS1 kinetic,noetic" # Check ROS version - assert os.environ['ROS_VERSION'] == "1",\ + assert os.environ['ROS_VERSION'] == "1", \ "Wrong ROS version: this plugin is for ROS1" # Check we can find Gazebo @@ -299,7 +299,7 @@ def __call__(self) -> None: version = packaging.version.parse(res.group(0)) min_version = packaging.version.parse(config.kGazebo['min_version']) - assert version >= min_version,\ + assert version >= min_version, \ f"Gazebo version {version} < min required {min_version}" @@ -326,10 +326,11 @@ def robot_prefix_extract(main_config: types.YAMLDict, def pre_exp_diagnostics(cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, logger: logging.Logger) -> None: s = "batch_exp_root='%s',runs/exp=%s,threads/job=%s,n_jobs=%s" logger.info(s, - cmdopts['batch_root'], + pathset.root, cmdopts['n_runs'], cmdopts['physics_n_threads'], cmdopts['exec_jobs_per_node']) diff --git a/sierra/plugins/platform/ros1gazebo/variables/population_size.py b/sierra/plugins/platform/ros1gazebo/variables/population_size.py index b3405bc6..0da37b89 100755 --- a/sierra/plugins/platform/ros1gazebo/variables/population_size.py +++ b/sierra/plugins/platform/ros1gazebo/variables/population_size.py @@ -153,7 +153,7 @@ def gen_tag_addlist(self) -> tp.List[xml.TagAddList]: return self.tag_adds - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: adds = self.gen_tag_addlist() return ['exp' + str(x) for x in range(0, len(adds))] @@ -164,7 +164,8 @@ def n_robots(self, exp_num: int) -> int: def factory(cli_arg: str, main_config: types.YAMLDict, cmdopts: types.Cmdopts, - **kwargs) -> PopulationSize: + batch_input_root: pathlib.Path, + ** kwargs) -> PopulationSize: """Create a :class:`PopulationSize` derived class from the cmdline definition. """ @@ -189,7 +190,7 @@ def __init__(self) -> None: PopulationSize.__init__(self, cli_arg, main_config, - cmdopts['batch_input_root'], + batch_input_root, cmdopts['robot'], max_sizes, positions) diff --git a/sierra/plugins/platform/ros1robot/generators/platform_generators.py b/sierra/plugins/platform/ros1robot/generators/platform_generators.py index de0f5e26..3d633108 100644 --- a/sierra/plugins/platform/ros1robot/generators/platform_generators.py +++ b/sierra/plugins/platform/ros1robot/generators/platform_generators.py @@ -81,12 +81,12 @@ def generate(self, exp_def: definition.XMLExpDef): with utils.utf8open(main_path) as f: main_config = yaml.load(f, yaml.FullLoader) - n_robots = utils.get_n_robots(main_config, + n_agents = utils.get_n_agents(main_config, self.cmdopts, self.launch_stem_path.parent, exp_def) - for i in range(0, n_robots): + for i in range(0, n_agents): prefix = main_config['ros']['robots'][self.cmdopts['robot']]['prefix'] exp_def.write_config.add({ 'src_parent': "./robot", diff --git a/sierra/plugins/platform/ros1robot/plugin.py b/sierra/plugins/platform/ros1robot/plugin.py index 4254f2c4..2005c545 100644 --- a/sierra/plugins/platform/ros1robot/plugin.py +++ b/sierra/plugins/platform/ros1robot/plugin.py @@ -17,7 +17,7 @@ # Project packages from sierra.plugins.platform.ros1robot import cmdline -from sierra.core import platform, config, ros1, types, utils +from sierra.core import platform, config, ros1, types, utils, batchroot from sierra.core.experiment import bindings, definition, xml import sierra.core.variables.batch_criteria as bc @@ -37,7 +37,7 @@ def __init__(self, exec_env: str) -> None: def __call__(self, args: argparse.Namespace) -> None: if args.nodefile is None: - assert 'SIERRA_NODEFILE' in os.environ,\ + assert 'SIERRA_NODEFILE' in os.environ, \ ("Non-ros1robot environment detected: --nodefile not " "passed and 'SIERRA_NODEFILE' not found") args.nodefile = os.environ['SIERRA_NODEFILE'] @@ -46,7 +46,7 @@ def __call__(self, args: argparse.Namespace) -> None: f"SIERRA_NODEFILE '{args.nodefile}' does not exist" self.logger.info("Using '%s' as robot hostnames file", args.nodefile) - assert not args.platform_vc,\ + assert not args.platform_vc, \ "Platform visual capture not supported on ros1robot" @@ -55,10 +55,10 @@ class ExpRunShellCmdsGenerator(): def __init__(self, cmdopts: types.Cmdopts, criteria: bc.BatchCriteria, - n_robots: int, + n_agents: int, exp_num: int) -> None: self.cmdopts = cmdopts - self.n_robots = n_robots + self.n_agents = n_agents self.exp_num = exp_num self.criteria = criteria self.logger = logging.getLogger('platform.ros1robot') @@ -90,7 +90,7 @@ def pre_run_cmds(self, self.logger.debug("Generating pre-exec cmds for run%s slaves: %d robots", run_num, - self.n_robots) + self.n_agents) script_yaml = main_config['ros']['robots'][self.cmdopts['robot']] script_file = script_yaml.get('setup_script', "$HOME/.bashrc") @@ -123,7 +123,7 @@ def _exec_run_cmds_master(self, run_num) # ROS master node - exp_dirname = self.criteria.gen_exp_names(self.cmdopts)[self.exp_num] + exp_dirname = self.criteria.gen_exp_names()[self.exp_num] exp_template_path = utils.exp_template_path(self.cmdopts, self.criteria.batch_input_root, exp_dirname) @@ -152,20 +152,20 @@ def _exec_run_cmds_slave(self, self.logger.debug("Generating exec cmds for run%s slaves: %d robots", run_num, - self.n_robots) + self.n_agents) nodes = platform.ExecEnvChecker.parse_nodefile(self.cmdopts['nodefile']) - if len(nodes) < self.n_robots: + if len(nodes) < self.n_agents: self.logger.critical(("Need %d hosts to correctly generate launch " "cmds for run%s with %d robots; %d available"), - self.n_robots, + self.n_agents, run_num, - self.n_robots, + self.n_agents, len(nodes)) ret = [] # type: tp.List[types.ShellCmdSpec] - for i in range(0, self.n_robots): + for i in range(0, self.n_agents): # --wait tells roslaunch to wait for the configured master to # come up before launch the robot code. cmd = '{0} --wait {1}_robot{2}{3} ' @@ -207,7 +207,7 @@ def pre_exp_cmds(self) -> tp.List[types.ShellCmdSpec]: self.logger.info("Using ROS_MASTER_URI=%s", master_uri) - return[ + return [ types.ShellCmdSpec( # roscore will run on the SIERRA host machine. cmd=f'export ROS_MASTER_URI={master_uri}', @@ -335,11 +335,11 @@ def __call__(self) -> None: k) # Check ROS distro - assert os.environ['ROS_DISTRO'] in ['kinetic', 'noetic'],\ + assert os.environ['ROS_DISTRO'] in ['kinetic', 'noetic'], \ "SIERRA only supports ROS1 kinetic,noetic" # Check ROS version - assert os.environ['ROS_VERSION'] == "1",\ + assert os.environ['ROS_VERSION'] == "1", \ "Wrong ROS version: This plugin is for ROS1" @@ -366,8 +366,9 @@ def robot_prefix_extract(main_config: types.YAMLDict, def pre_exp_diagnostics(cmdopts: types.Cmdopts, + pathset: batchroot.PathSet, logger: logging.Logger) -> None: s = "batch_exp_root='%s',runs/exp=%s" logger.info(s, - cmdopts['batch_root'], + pathset.root, cmdopts['n_runs']) diff --git a/sierra/plugins/platform/ros1robot/variables/population_size.py b/sierra/plugins/platform/ros1robot/variables/population_size.py index 8ab3ebf2..7280438a 100755 --- a/sierra/plugins/platform/ros1robot/variables/population_size.py +++ b/sierra/plugins/platform/ros1robot/variables/population_size.py @@ -79,7 +79,7 @@ def gen_tag_addlist(self) -> tp.List[xml.TagAddList]: per_robot.append(xml.TagAdd("./master/group/[@ns='sierra']", "param", { - 'name': 'experiment/n_robots', + 'name': 'experiment/n_agents', 'value': str(s) }, False)) @@ -109,17 +109,18 @@ def gen_tag_addlist(self) -> tp.List[xml.TagAddList]: return self.tag_adds - def gen_exp_names(self, cmdopts: types.Cmdopts) -> tp.List[str]: + def gen_exp_names(self) -> tp.List[str]: adds = self.gen_tag_addlist() return ['exp' + str(x) for x in range(0, len(adds))] - def n_robots(self, exp_num: int) -> int: + def n_agents(self, exp_num: int) -> int: return self.sizes[exp_num] def factory(cli_arg: str, main_config: types.YAMLDict, cmdopts: types.Cmdopts, + batch_input_root: pathlib.Path, **kwargs) -> PopulationSize: """Create a :class:`PopulationSize` derived class from the cmdline definition. @@ -132,7 +133,7 @@ def __init__(self) -> None: PopulationSize.__init__(self, cli_arg, main_config, - cmdopts['batch_input_root'], + batch_input_root, cmdopts['robot'], max_sizes)