From 68ed0f782ca89750119eeba9931ef7ac539aaecc Mon Sep 17 00:00:00 2001 From: Silvan Sievers Date: Mon, 17 Jul 2023 18:16:56 +0200 Subject: [PATCH] exps --- experiments/issue1092/archive.py | 107 +++++++ experiments/issue1092/common_setup.py | 400 +++++++++++++++++++++++++ experiments/issue1092/ms-parser.py | 102 +++++++ experiments/issue1092/requirements.in | 1 + experiments/issue1092/requirements.txt | 34 +++ experiments/issue1092/v1.py | 117 ++++++++ 6 files changed, 761 insertions(+) create mode 100644 experiments/issue1092/archive.py create mode 100644 experiments/issue1092/common_setup.py create mode 100755 experiments/issue1092/ms-parser.py create mode 100644 experiments/issue1092/requirements.in create mode 100644 experiments/issue1092/requirements.txt create mode 100755 experiments/issue1092/v1.py diff --git a/experiments/issue1092/archive.py b/experiments/issue1092/archive.py new file mode 100644 index 0000000000..6786a89930 --- /dev/null +++ b/experiments/issue1092/archive.py @@ -0,0 +1,107 @@ +from pathlib import Path +import subprocess +import tarfile +from tempfile import TemporaryDirectory + +ARCHIVE_HOST = "aifiles" +ARCHIVE_LOCATION = Path("experiments") + +def add_archive_step(exp, path): + """ + Adds a step to the given experiment that will archive it to the + archive location specified in ARCHIVE_LOCATION und the given path. + We archive the following files: + - everything in the same directory as the main experiment script + (except for 'data', '.venv', and '__pycache__') + - all generated reports + - the combined properties file + - all run and error logs + - the source code stored in the experiment data directory + - any files added as resources to the experiment + + The first two items in the above list will be stored unpacked for easier + access while all otherdata will be packed. + """ + def archive(): + archive_path = ARCHIVE_LOCATION / path + _archive_script_dir(exp, ARCHIVE_HOST, archive_path) + _archive_eval_dir(exp, ARCHIVE_HOST, archive_path) + _archive_data_dir(exp, ARCHIVE_HOST, archive_path) + + exp.add_step("archive", archive) + + +def _archive_script_dir(exp, host, archive_path): + """ + Archives everything except 'data', '.venv', and '__pycache__' from the + same directory as the experiment script at host:archive_path/scripts. + """ + script_dir = Path(exp._script).parent + target_path = archive_path / "scripts" + + script_files = [f for f in script_dir.glob("*") + if f.name not in ["data", ".venv", "venv", "__pycache__"]] + _rsync(script_files, host, target_path) + + +def _archive_data_dir(exp, host, archive_path): + """ + Packs all files we want to archive from the experiment's data directory and + then archives the packed data at host:archive_path/data. Specifically, the + archived files are: + - all files directly in the data dir (added resources such as parsers) + - all directories starting with "code_" (source code of all revisions and + the compilied binaries) + - All *.log and *.err files from the run directories + """ + data_dir = Path(exp.path) + target_path = archive_path / "data" + + data_files = [f for f in data_dir.glob("*") if f.is_file()] + data_files.extend([d for d in data_dir.glob("code-*") if d.is_dir()]) + data_files.extend(data_dir.glob("runs*/*/*.log")) + data_files.extend(data_dir.glob("runs*/*/*.err")) + with TemporaryDirectory() as tmpdirname: + packed_filename = Path(tmpdirname) / (exp.name + ".tar.xz") + _pack(data_files, packed_filename, Path(exp.path).parent) + _rsync([packed_filename], host, target_path) + + +def _archive_eval_dir(exp, host, archive_path): + """ + Archives all files in the experiment's eval dir. + If there is a properties file, it will be packed and only the + packed version will be included in the resulting list. + """ + eval_dir = Path(exp.eval_dir) + target_path = archive_path / "data" / eval_dir.name + + filenames = list(eval_dir.glob("*")) + properties = eval_dir / "properties" + if properties.exists(): + filenames.remove(properties) + with TemporaryDirectory() as tmpdirname: + packed_properties = Path(tmpdirname) / "properties.tar.xz" + _pack([properties], packed_properties, eval_dir) + _rsync([packed_properties], host, target_path) + _rsync(filenames, host, target_path) + + +def _pack(filenames, archive_filename, path_prefix): + """ + Packs all files given in filenames into an archive (.tar.xz) located at + archive_filename. The path_prefix is removed in the archive, i.e., + if the filename is '/path/to/file' and the prefix is '/path', the location + inside the archive will be 'to/file'. + """ + with tarfile.open(archive_filename, "w|xz") as f: + for name in filenames: + f.add(name, name.relative_to(path_prefix)) + +def _rsync(filenames, host, target_path): + # Before copying files we have to create the target path on host. + # We could use the rsync option --mkpath but it is only available in newer + # rsync versions (and not in the one running on the grid) + # https://stackoverflow.com/questions/1636889 + subprocess.run(["ssh", host, "mkdir", "-p", target_path]) + subprocess.run(["rsync", "-avz"] + [str(f) for f in filenames] + [f"{host}:{target_path}"]) diff --git a/experiments/issue1092/common_setup.py b/experiments/issue1092/common_setup.py new file mode 100644 index 0000000000..3a00971e2e --- /dev/null +++ b/experiments/issue1092/common_setup.py @@ -0,0 +1,400 @@ +# -*- coding: utf-8 -*- + +import itertools +import os +import platform +import subprocess +import sys + +from lab.experiment import ARGPARSER +from lab import tools + +from downward.experiment import FastDownwardExperiment +from downward.reports.absolute import AbsoluteReport +from downward.reports.compare import ComparativeReport +from downward.reports.scatter import ScatterPlotReport + +import archive + +def parse_args(): + ARGPARSER.add_argument( + "--test", + choices=["yes", "no", "auto"], + default="auto", + dest="test_run", + help="test experiment locally on a small suite if --test=yes or " + "--test=auto and we are not on a cluster") + return ARGPARSER.parse_args() + +ARGS = parse_args() + + +DEFAULT_OPTIMAL_SUITE = [ + 'agricola-opt18-strips', 'airport', 'barman-opt11-strips', + 'barman-opt14-strips', 'blocks', 'childsnack-opt14-strips', + 'data-network-opt18-strips', 'depot', 'driverlog', + 'elevators-opt08-strips', 'elevators-opt11-strips', + 'floortile-opt11-strips', 'floortile-opt14-strips', 'freecell', + 'ged-opt14-strips', 'grid', 'gripper', 'hiking-opt14-strips', + 'logistics00', 'logistics98', 'miconic', 'movie', 'mprime', + 'mystery', 'nomystery-opt11-strips', 'openstacks-opt08-strips', + 'openstacks-opt11-strips', 'openstacks-opt14-strips', + 'openstacks-strips', 'organic-synthesis-opt18-strips', + 'organic-synthesis-split-opt18-strips', 'parcprinter-08-strips', + 'parcprinter-opt11-strips', 'parking-opt11-strips', + 'parking-opt14-strips', 'pathways', 'pegsol-08-strips', + 'pegsol-opt11-strips', 'petri-net-alignment-opt18-strips', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-small', 'rovers', + 'satellite', 'scanalyzer-08-strips', 'scanalyzer-opt11-strips', + 'snake-opt18-strips', 'sokoban-opt08-strips', + 'sokoban-opt11-strips', 'spider-opt18-strips', 'storage', + 'termes-opt18-strips', 'tetris-opt14-strips', + 'tidybot-opt11-strips', 'tidybot-opt14-strips', 'tpp', + 'transport-opt08-strips', 'transport-opt11-strips', + 'transport-opt14-strips', 'trucks-strips', 'visitall-opt11-strips', + 'visitall-opt14-strips', 'woodworking-opt08-strips', + 'woodworking-opt11-strips', 'zenotravel'] + +DEFAULT_SATISFICING_SUITE = [ + 'agricola-sat18-strips', 'airport', 'assembly', + 'barman-sat11-strips', 'barman-sat14-strips', 'blocks', + 'caldera-sat18-adl', 'caldera-split-sat18-adl', 'cavediving-14-adl', + 'childsnack-sat14-strips', 'citycar-sat14-adl', + 'data-network-sat18-strips', 'depot', 'driverlog', + 'elevators-sat08-strips', 'elevators-sat11-strips', + 'flashfill-sat18-adl', 'floortile-sat11-strips', + 'floortile-sat14-strips', 'freecell', 'ged-sat14-strips', 'grid', + 'gripper', 'hiking-sat14-strips', 'logistics00', 'logistics98', + 'maintenance-sat14-adl', 'miconic', 'miconic-fulladl', + 'miconic-simpleadl', 'movie', 'mprime', 'mystery', + 'nomystery-sat11-strips', 'nurikabe-sat18-adl', 'openstacks', + 'openstacks-sat08-adl', 'openstacks-sat08-strips', + 'openstacks-sat11-strips', 'openstacks-sat14-strips', + 'openstacks-strips', 'optical-telegraphs', + 'organic-synthesis-sat18-strips', + 'organic-synthesis-split-sat18-strips', 'parcprinter-08-strips', + 'parcprinter-sat11-strips', 'parking-sat11-strips', + 'parking-sat14-strips', 'pathways', + 'pegsol-08-strips', 'pegsol-sat11-strips', 'philosophers', + 'pipesworld-notankage', 'pipesworld-tankage', 'psr-large', + 'psr-middle', 'psr-small', 'rovers', 'satellite', + 'scanalyzer-08-strips', 'scanalyzer-sat11-strips', 'schedule', + 'settlers-sat18-adl', 'snake-sat18-strips', 'sokoban-sat08-strips', + 'sokoban-sat11-strips', 'spider-sat18-strips', 'storage', + 'termes-sat18-strips', 'tetris-sat14-strips', + 'thoughtful-sat14-strips', 'tidybot-sat11-strips', 'tpp', + 'transport-sat08-strips', 'transport-sat11-strips', + 'transport-sat14-strips', 'trucks', 'trucks-strips', + 'visitall-sat11-strips', 'visitall-sat14-strips', + 'woodworking-sat08-strips', 'woodworking-sat11-strips', + 'zenotravel'] + + +def get_script(): + """Get file name of main script.""" + return tools.get_script_path() + + +def get_script_dir(): + """Get directory of main script. + + Usually a relative directory (depends on how it was called by the user.)""" + return os.path.dirname(get_script()) + + +def get_experiment_name(): + """Get name for experiment. + + Derived from the absolute filename of the main script, e.g. + "/ham/spam/eggs.py" => "spam-eggs".""" + script = os.path.abspath(get_script()) + script_dir = os.path.basename(os.path.dirname(script)) + script_base = os.path.splitext(os.path.basename(script))[0] + return "%s-%s" % (script_dir, script_base) + + +def get_data_dir(): + """Get data dir for the experiment. + + This is the subdirectory "data" of the directory containing + the main script.""" + return os.path.join(get_script_dir(), "data", get_experiment_name()) + + +def get_repo_base(): + """Get base directory of the repository, as an absolute path. + + Search upwards in the directory tree from the main script until a + directory with a subdirectory named ".git" is found. + + Abort if the repo base cannot be found.""" + path = os.path.abspath(get_script_dir()) + while os.path.dirname(path) != path: + if os.path.exists(os.path.join(path, ".git")): + return path + path = os.path.dirname(path) + sys.exit("repo base could not be found") + + +def is_repo_base(path): + """Check if the given path points to a Git repository.""" + return os.path.exists(os.path.join(path, ".git")) + + +def is_running_on_cluster(): + node = platform.node() + return node.endswith(".scicore.unibas.ch") or node.endswith(".cluster.bc2.ch") + + +def is_test_run(): + return ARGS.test_run == "yes" or ( + ARGS.test_run == "auto" and not is_running_on_cluster()) + + +def get_algo_nick(revision, config_nick): + return "{revision}-{config_nick}".format(**locals()) + + +class IssueConfig(object): + """Hold information about a planner configuration. + + See FastDownwardExperiment.add_algorithm() for documentation of the + constructor's options. + + """ + def __init__(self, nick, component_options, + build_options=None, driver_options=None): + self.nick = nick + self.component_options = component_options + self.build_options = build_options + self.driver_options = driver_options + + +class IssueExperiment(FastDownwardExperiment): + """Subclass of FastDownwardExperiment with some convenience features.""" + + DEFAULT_TEST_SUITE = ["depot:p01.pddl", "gripper:prob01.pddl"] + + DEFAULT_TABLE_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "evaluations", + "expansions", + "expansions_until_last_jump", + "generated", + "memory", + "planner_memory", + "planner_time", + "quality", + "run_dir", + "score_evaluations", + "score_expansions", + "score_generated", + "score_memory", + "score_search_time", + "score_total_time", + "search_time", + "total_time", + ] + + DEFAULT_SCATTER_PLOT_ATTRIBUTES = [ + "evaluations", + "expansions", + "expansions_until_last_jump", + "initial_h_value", + "memory", + "search_time", + "total_time", + ] + + PORTFOLIO_ATTRIBUTES = [ + "cost", + "coverage", + "error", + "plan_length", + "run_dir", + ] + + def __init__(self, repo_base=get_repo_base(), revisions=None, configs=None, path=None, **kwargs): + """ + + You can either specify both *revisions* and *configs* or none + of them. If they are omitted, you will need to call + exp.add_algorithm() manually. + + If *revisions* is given, it must be a non-empty list of + revision identifiers, which specify which planner versions to + use in the experiment. The same versions are used for + translator, preprocessor and search. :: + + IssueExperiment(revisions=["issue123", "4b3d581643"], ...) + + If *configs* is given, it must be a non-empty list of + IssueConfig objects. :: + + IssueExperiment(..., configs=[ + IssueConfig("ff", ["--search", "eager_greedy(ff())"]), + IssueConfig( + "lama", [], + driver_options=["--alias", "seq-sat-lama-2011"]), + ]) + + If *path* is specified, it must be the path to where the + experiment should be built (e.g. + /home/john/experiments/issue123/exp01/). If omitted, the + experiment path is derived automatically from the main + script's filename. Example:: + + script = experiments/issue123/exp01.py --> + path = experiments/issue123/data/issue123-exp01/ + + """ + + if not is_repo_base(repo_base): + sys.exit(f"repo base '{repo_base}' could not be found or does not contain a git repository.") + + path = path or get_data_dir() + + FastDownwardExperiment.__init__(self, path=path, **kwargs) + + if (revisions and not configs) or (not revisions and configs): + raise ValueError( + "please provide either both or none of revisions and configs") + + for rev in revisions: + for config in configs: + self.add_algorithm( + get_algo_nick(rev, config.nick), + repo_base, + rev, + config.component_options, + build_options=config.build_options, + driver_options=config.driver_options) + + self._revisions = revisions + self._configs = configs + + @classmethod + def _is_portfolio(cls, config_nick): + return "fdss" in config_nick + + @classmethod + def get_supported_attributes(cls, config_nick, attributes): + if cls._is_portfolio(config_nick): + return [attr for attr in attributes + if attr in cls.PORTFOLIO_ATTRIBUTES] + return attributes + + def add_absolute_report_step(self, **kwargs): + """Add step that makes an absolute report. + + Absolute reports are useful for experiments that don't compare + revisions. + + The report is written to the experiment evaluation directory. + + All *kwargs* will be passed to the AbsoluteReport class. If the + keyword argument *attributes* is not specified, a default list + of attributes is used. :: + + exp.add_absolute_report_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + report = AbsoluteReport(**kwargs) + outfile = os.path.join( + self.eval_dir, + get_experiment_name() + "." + report.output_format) + self.add_report(report, outfile=outfile) + + def add_comparison_table_step(self, revision_pairs=[], **kwargs): + """Add a step that makes pairwise revision comparisons. + + Create comparative reports for all pairs of Fast Downward + revisions. Each report pairs up the runs of the same config and + lists the two absolute attribute values and their difference + for all attributes in kwargs["attributes"]. + + All *kwargs* will be passed to the CompareConfigsReport class. + If the keyword argument *attributes* is not specified, a + default list of attributes is used. :: + + exp.add_comparison_table_step(attributes=["coverage"]) + + """ + kwargs.setdefault("attributes", self.DEFAULT_TABLE_ATTRIBUTES) + + if not revision_pairs: + revision_pairs = [(rev1, rev2) for rev1, rev2 in itertools.combinations(self._revisions, 2)] + def make_comparison_tables(): + for rev1, rev2 in revision_pairs: + compared_configs = [] + for config in self._configs: + config_nick = config.nick + compared_configs.append( + ("%s-%s" % (rev1, config_nick), + "%s-%s" % (rev2, config_nick), + "Diff (%s)" % config_nick)) + report = ComparativeReport(compared_configs, **kwargs) + outfile = os.path.join( + self.eval_dir, + "%s-%s-%s-compare.%s" % ( + self.name, rev1, rev2, report.output_format)) + report(self.eval_dir, outfile) + + self.add_step("make-comparison-tables", make_comparison_tables) + + def add_scatter_plot_step(self, revision_pairs=[], relative=False, + attributes=None, additional=[]): + """Add step creating (relative) scatter plots for all revision pairs. + + Create a scatter plot for each combination of attribute, + configuration and revisions pair. If *attributes* is not + specified, a list of common scatter plot attributes is used. + For portfolios all attributes except "cost", "coverage" and + "plan_length" will be ignored. :: + + exp.add_scatter_plot_step(attributes=["expansions"]) + + """ + if relative: + scatter_dir = os.path.join(self.eval_dir, "scatter-relative") + step_name = "make-relative-scatter-plots" + else: + scatter_dir = os.path.join(self.eval_dir, "scatter-absolute") + step_name = "make-absolute-scatter-plots" + if attributes is None: + attributes = self.DEFAULT_SCATTER_PLOT_ATTRIBUTES + if not revision_pairs: + revision_pairs = [entry for entry in itertools.combinations(self._revisions, 2)] + + def make_scatter_plot(config_nick, rev1, rev2, attribute, config_nick2=None): + name = "-".join([self.name, rev1, rev2, attribute, config_nick]) + if config_nick2 is not None: + name += "-" + config_nick2 + print("Make scatter plot for", name) + algo1 = get_algo_nick(rev1, config_nick) + algo2 = get_algo_nick(rev2, config_nick if config_nick2 is None else config_nick2) + report = ScatterPlotReport( + filter_algorithm=[algo1, algo2], + attributes=[attribute], + relative=relative, + get_category=lambda run1, run2: run1["domain"]) + report( + self.eval_dir, + os.path.join(scatter_dir, rev1 + "-" + rev2, name)) + + def make_scatter_plots(): + for config in self._configs: + for rev1, rev2 in revision_pairs: + for attribute in self.get_supported_attributes( + config.nick, attributes): + make_scatter_plot(config.nick, rev1, rev2, attribute) + for nick1, nick2, rev1, rev2, attribute in additional: + make_scatter_plot(nick1, rev1, rev2, attribute, config_nick2=nick2) + + self.add_step(step_name, make_scatter_plots) + + def add_archive_step(self, archive_path): + archive.add_archive_step(self, archive_path) diff --git a/experiments/issue1092/ms-parser.py b/experiments/issue1092/ms-parser.py new file mode 100755 index 0000000000..5d601c8633 --- /dev/null +++ b/experiments/issue1092/ms-parser.py @@ -0,0 +1,102 @@ +#! /usr/bin/env python + +import math +import re + +from lab.parser import Parser + +parser = Parser() +parser.add_pattern('ms_construction_time', 'Merge-and-shrink algorithm runtime: (.+)s', required=False, type=float) +parser.add_pattern('ms_main_loop_max_time', 'Main loop max time in seconds: (.+)', required=False, type=float) +parser.add_pattern('ms_atomic_construction_time', 'M&S algorithm timer: (.+)s \(after computation of atomic factors\)', required=False, type=float) +parser.add_pattern('ms_memory_delta', 'Final peak memory increase of merge-and-shrink algorithm: (\d+) KB', required=False, type=int) +parser.add_pattern('ms_num_remaining_factors', 'Number of remaining factors: (\d+)', required=False, type=int) +parser.add_pattern('ms_num_factors_kept', 'Number of factors kept: (\d+)', required=False, type=int) + +def check_ms_constructed(content, props): + ms_construction_time = props.get('ms_construction_time') + abstraction_constructed = False + if ms_construction_time is not None: + abstraction_constructed = True + props['ms_abstraction_constructed'] = abstraction_constructed + +parser.add_function(check_ms_constructed) + +def check_atomic_fts_constructed(content, props): + ms_atomic_construction_time = props.get('ms_atomic_construction_time') + ms_atomic_fts_constructed = False + if ms_atomic_construction_time is not None: + ms_atomic_fts_constructed = True + props['ms_atomic_fts_constructed'] = ms_atomic_fts_constructed + +parser.add_function(check_atomic_fts_constructed) + +def check_planner_exit_reason(content, props): + ms_abstraction_constructed = props.get('ms_abstraction_constructed') + error = props.get('error') + if error != 'success' and error != 'search-out-of-time' and error != 'search-out-of-memory': + print('error: %s' % error) + return + + # Check whether merge-and-shrink computation or search ran out of + # time or memory. + ms_out_of_time = False + ms_out_of_memory = False + search_out_of_time = False + search_out_of_memory = False + if ms_abstraction_constructed == False: + if error == 'search-out-of-time': + ms_out_of_time = True + elif error == 'search-out-of-memory': + ms_out_of_memory = True + elif ms_abstraction_constructed == True: + if error == 'search-out-of-time': + search_out_of_time = True + elif error == 'search-out-of-memory': + search_out_of_memory = True + props['ms_out_of_time'] = ms_out_of_time + props['ms_out_of_memory'] = ms_out_of_memory + props['search_out_of_time'] = search_out_of_time + props['search_out_of_memory'] = search_out_of_memory + +parser.add_function(check_planner_exit_reason) + +def check_perfect_heuristic(content, props): + plan_length = props.get('plan_length') + expansions = props.get('expansions') + if plan_length != None: + perfect_heuristic = False + if plan_length + 1 == expansions: + perfect_heuristic = True + props['perfect_heuristic'] = perfect_heuristic + +parser.add_function(check_perfect_heuristic) + +def add_construction_time_score(content, props): + """ + Convert ms_construction_time into scores in the range [0, 1]. + + Best possible performance in a task is counted as 1, while failure + to construct the heuristic and worst performance are counted as 0. + + """ + def log_score(value, min_bound, max_bound): + if value is None: + return 0 + value = max(value, min_bound) + value = min(value, max_bound) + raw_score = math.log(value) - math.log(max_bound) + best_raw_score = math.log(min_bound) - math.log(max_bound) + return raw_score / best_raw_score + + main_loop_max_time = props.get('ms_main_loop_max_time') + if main_loop_max_time is not None and main_loop_max_time == float('inf'): + max_time = props.get('limit_search_time') + if max_time is not None: + main_loop_max_time = max_time + if main_loop_max_time is not None and main_loop_max_time != float('inf'): + props['score_ms_construction_time'] = log_score(props.get('ms_construction_time'), min_bound=1.0, max_bound=main_loop_max_time) + +parser.add_function(add_construction_time_score) + +parser.parse() diff --git a/experiments/issue1092/requirements.in b/experiments/issue1092/requirements.in new file mode 100644 index 0000000000..5339df0c30 --- /dev/null +++ b/experiments/issue1092/requirements.in @@ -0,0 +1 @@ +lab==7.3 diff --git a/experiments/issue1092/requirements.txt b/experiments/issue1092/requirements.txt new file mode 100644 index 0000000000..fd7b6b30b1 --- /dev/null +++ b/experiments/issue1092/requirements.txt @@ -0,0 +1,34 @@ +# +# This file is autogenerated by pip-compile with Python 3.10 +# by the following command: +# +# pip-compile requirements.in +# +cycler==0.11.0 + # via matplotlib +fonttools==4.35.0 + # via matplotlib +kiwisolver==1.4.4 + # via matplotlib +lab==7.3 + # via -r requirements.in +matplotlib==3.5.3 + # via lab +numpy==1.23.2 + # via matplotlib +packaging==21.3 + # via matplotlib +pillow==9.2.0 + # via matplotlib +pyparsing==3.0.9 + # via + # matplotlib + # packaging +python-dateutil==2.8.2 + # via matplotlib +simplejson==3.17.6 + # via lab +six==1.16.0 + # via python-dateutil +txt2tags==3.7 + # via lab diff --git a/experiments/issue1092/v1.py b/experiments/issue1092/v1.py new file mode 100755 index 0000000000..3e8dfe26b6 --- /dev/null +++ b/experiments/issue1092/v1.py @@ -0,0 +1,117 @@ +#! /usr/bin/env python3 + +import itertools +import os +from pathlib import Path + +from lab.environments import LocalEnvironment, BaselSlurmEnvironment +from lab.reports import Attribute, geometric_mean + +from downward.reports.compare import ComparativeReport + +import common_setup +from common_setup import IssueConfig, IssueExperiment + +ARCHIVE_PATH = "ai/downward/issue1092" +DIR = os.path.dirname(os.path.abspath(__file__)) +REPO_DIR = os.environ["DOWNWARD_REPO"] +BENCHMARKS_DIR = os.environ["DOWNWARD_BENCHMARKS"] +REVISION = "issue1092-v1" +REVISIONS = [REVISION] +BUILDS = ["release"] +CONFIG_NICKS = [ + ('sbmiasm-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(use_caching=false,shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), + ('sbmiasm-b50k-cache', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_stateless(merge_selector=score_based_filtering(scoring_functions=[sf_miasm(use_caching=true,shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), + ('sccs-sbmiasm-b50k', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[sf_miasm(use_caching=false,shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), + ('sccs-sbmiasm-b50k-cache', ['--search', 'astar(merge_and_shrink(shrink_strategy=shrink_bisimulation(greedy=false),merge_strategy=merge_sccs(order_of_sccs=topological,merge_selector=score_based_filtering(scoring_functions=[sf_miasm(use_caching=true,shrink_strategy=shrink_bisimulation(greedy=false),max_states=50000,threshold_before_merge=1),total_order(atomic_ts_order=reverse_level,product_ts_order=new_to_old,atomic_before_product=false)])),label_reduction=exact(before_shrinking=true,before_merging=false),max_states=50000,threshold_before_merge=1))']), +] +CONFIGS = [ + IssueConfig( + config_nick, + config, + build_options=[build], + driver_options=["--build", build]) + for build in BUILDS + for config_nick, config in CONFIG_NICKS +] + +SUITE = common_setup.DEFAULT_OPTIMAL_SUITE +ENVIRONMENT = BaselSlurmEnvironment( + partition="infai_2", + email="silvan.sievers@unibas.ch", + # paths obtained via: + # module purge + # module -q CMake/3.23.1-GCCcore-11.3.0 + # echo $PATH + # echo $LD_LIBRARY_PATH + setup='export PATH=/scicore/soft/apps/binutils/2.36.1-GCCcore-10.3.0/bin:/scicore/soft/apps/CMake/3.23.1-GCCcore-10.3.0/bin:/scicore/soft/apps/OpenSSL/1.1/bin:/scicore/soft/apps/libarchive/3.5.1-GCCcore-10.3.0/bin:/scicore/soft/apps/XZ/5.2.5-GCCcore-10.3.0/bin:/scicore/soft/apps/cURL/7.76.0-GCCcore-10.3.0/bin:/scicore/soft/apps/bzip2/1.0.8-GCCcore-10.3.0/bin:/scicore/soft/apps/ncurses/6.2-GCCcore-10.3.0/bin:/scicore/soft/apps/GCCcore/10.3.0/bin:/infai/sieverss/repos/bin:/infai/sieverss/local:/export/soft/lua_lmod/centos7/lmod/lmod/libexec:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:$PATH\nexport LD_LIBRARY_PATH=/scicore/soft/apps/binutils/2.36.1-GCCcore-10.3.0/lib:/scicore/soft/apps/OpenSSL/1.1/lib:/scicore/soft/apps/libarchive/3.5.1-GCCcore-10.3.0/lib:/scicore/soft/apps/XZ/5.2.5-GCCcore-10.3.0/lib:/scicore/soft/apps/cURL/7.76.0-GCCcore-10.3.0/lib:/scicore/soft/apps/bzip2/1.0.8-GCCcore-10.3.0/lib:/scicore/soft/apps/zlib/1.2.11-GCCcore-10.3.0/lib:/scicore/soft/apps/ncurses/6.2-GCCcore-10.3.0/lib:/scicore/soft/apps/GCCcore/10.3.0/lib64') + +if common_setup.is_test_run(): + SUITE = IssueExperiment.DEFAULT_TEST_SUITE + ENVIRONMENT = LocalEnvironment(processes=4) + +exp = IssueExperiment( + REPO_DIR, + revisions=REVISIONS, + configs=CONFIGS, + environment=ENVIRONMENT, +) +exp.add_suite(BENCHMARKS_DIR, SUITE) + +exp.add_parser(exp.EXITCODE_PARSER) +exp.add_parser(exp.TRANSLATOR_PARSER) +exp.add_parser(exp.SINGLE_SEARCH_PARSER) +exp.add_parser(exp.PLANNER_PARSER) +exp.add_parser('ms-parser.py') + +exp.add_step('build', exp.build) +exp.add_step('start', exp.start_runs) +exp.add_fetcher(name='fetch') + +# planner outcome attributes +perfect_heuristic = Attribute('perfect_heuristic', absolute=True, min_wins=False) + +# m&s attributes +ms_construction_time = Attribute('ms_construction_time', absolute=False, min_wins=True, function=geometric_mean) +score_ms_construction_time = Attribute('score_ms_construction_time', min_wins=False, digits=4) +ms_atomic_construction_time = Attribute('ms_atomic_construction_time', absolute=False, min_wins=True, functions=[geometric_mean]) +ms_abstraction_constructed = Attribute('ms_abstraction_constructed', absolute=True, min_wins=False) +ms_atomic_fts_constructed = Attribute('ms_atomic_fts_constructed', absolute=True, min_wins=False) +ms_out_of_memory = Attribute('ms_out_of_memory', absolute=True, min_wins=True) +ms_out_of_time = Attribute('ms_out_of_time', absolute=True, min_wins=True) +search_out_of_memory = Attribute('search_out_of_memory', absolute=True, min_wins=True) +search_out_of_time = Attribute('search_out_of_time', absolute=True, min_wins=True) + +extra_attributes = [ + perfect_heuristic, + + ms_construction_time, + score_ms_construction_time, + ms_atomic_construction_time, + ms_abstraction_constructed, + ms_atomic_fts_constructed, + ms_out_of_memory, + ms_out_of_time, + search_out_of_memory, + search_out_of_time, +] +attributes = exp.DEFAULT_TABLE_ATTRIBUTES +attributes.extend(extra_attributes) + +report_name=f'{exp.name}-compare' +report_file=Path(exp.eval_dir) / f'{report_name}.html' +exp.add_report( + ComparativeReport( + attributes=attributes, + algorithm_pairs=[ + (f"{REVISION}-sbmiasm-b50k", f"{REVISION}-sbmiasm-b50k-cache"), + (f"{REVISION}-sccs-sbmiasm-b50k", f"{REVISION}-sccs-sbmiasm-b50k-cache"), + ], + ), + name=report_name, + outfile=report_file, +) + +exp.add_archive_step(ARCHIVE_PATH) + +exp.run_steps()