Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into envvar
Browse files Browse the repository at this point in the history
  • Loading branch information
caizixian committed Nov 20, 2023
2 parents 2a2154b + 6ba513f commit e1c10ce
Show file tree
Hide file tree
Showing 26 changed files with 889 additions and 757 deletions.
3 changes: 3 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
{
"python.pythonPath": "env/bin/python"
}
4 changes: 3 additions & 1 deletion docs/src/changelog.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
# Changelog
## Unreleased
### Added
#### Benchmark Suites
- `DaCapo`: add 23.11-Chopin release and minheap values.

### Changed

Expand Down Expand Up @@ -200,7 +202,7 @@
- Whitespaces can be used in config strings for visual alignment. They are ignored when parsed.

#### Commands
- The `--slice` argument of `runbms` now accepts multiple comma-separated floating point numbers.
- The `--slice` argument of `runbms` now accepts multiple comma-separated floating point numbers.

### Removed
#### Base Configurations
Expand Down
16 changes: 9 additions & 7 deletions src/running/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,13 @@

def setup_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true",
help="change logging level to DEBUG")
parser.add_argument("--version", action="version",
version="running {}".format(__VERSION__))
parser.add_argument("-d", "--dry-run", action="store_true",
help="dry run")
parser.add_argument(
"-v", "--verbose", action="store_true", help="change logging level to DEBUG"
)
parser.add_argument(
"--version", action="version", version="running {}".format(__VERSION__)
)
parser.add_argument("-d", "--dry-run", action="store_true", help="dry run")
subparsers = parser.add_subparsers()
for m in MODULES:
m.setup_parser(subparsers)
Expand All @@ -38,7 +39,8 @@ def main():
log_level = logging.INFO
logging.basicConfig(
format="[%(levelname)s] %(asctime)s %(filename)s:%(lineno)d %(message)s",
level=log_level)
level=log_level,
)

if args.get("dry_run") == True:
set_dry_run(True)
Expand Down
2 changes: 1 addition & 1 deletion src/running/__version__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
VERSION = (0, 4, 2)
__VERSION__ = '.'.join(map(str, VERSION))
__VERSION__ = ".".join(map(str, VERSION))
101 changes: 61 additions & 40 deletions src/running/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,23 @@ class SubprocessrExit(Enum):
Dryrun = 4


B = TypeVar('B', bound='Benchmark')
B = TypeVar("B", bound="Benchmark")


class Benchmark(object):
def __init__(self, suite_name: str, name: str, wrapper: Optional[str] = None, timeout: Optional[int] = None, override_cwd: Optional[Path] = None, companion: Optional[str] = None, runtime_specific_modifiers_strategy: Optional[Callable[[Runtime], Sequence[Modifier]]] = None, **kwargs):
def __init__(
self,
suite_name: str,
name: str,
wrapper: Optional[str] = None,
timeout: Optional[int] = None,
override_cwd: Optional[Path] = None,
companion: Optional[str] = None,
runtime_specific_modifiers_strategy: Optional[
Callable[[Runtime], Sequence[Modifier]]
] = None,
**kwargs
):
self.name = name
self.suite_name = suite_name
self.env_args: Dict[str, str]
Expand All @@ -44,18 +56,20 @@ def __init__(self, suite_name: str, name: str, wrapper: Optional[str] = None, ti
# ignore the current working directory provided by commands like runbms or minheap
# certain benchmarks expect to be invoked from certain directories
self.override_cwd = override_cwd
self.runtime_specific_modifiers_strategy: Callable[[
Runtime], Sequence[Modifier]]
self.runtime_specific_modifiers_strategy: Callable[
[Runtime], Sequence[Modifier]
]
if runtime_specific_modifiers_strategy is not None:
self.runtime_specific_modifiers_strategy = runtime_specific_modifiers_strategy
self.runtime_specific_modifiers_strategy = (
runtime_specific_modifiers_strategy
)
else:
self.runtime_specific_modifiers_strategy = lambda _runtime: []

def get_env_str(self) -> str:
return " ".join([
"{}={}".format(k, smart_quote(v))
for (k, v) in self.env_args.items()
])
return " ".join(
["{}={}".format(k, smart_quote(v)) for (k, v) in self.env_args.items()]
)

def get_full_args(self, _runtime: Runtime) -> List[Union[str, Path]]:
# makes a copy because the subclass might change the list
Expand Down Expand Up @@ -83,18 +97,19 @@ def attach_modifiers(self: B, modifiers: Sequence[Modifier]) -> B:
def to_string(self, runtime: Runtime) -> str:
return "{} {}".format(
self.get_env_str(),
" ".join([
smart_quote(os.path.expandvars(x))
for x in self.get_full_args(runtime)
])
" ".join(
[
smart_quote(os.path.expandvars(x))
for x in self.get_full_args(runtime)
]
),
)

def run(self, runtime: Runtime, cwd: Optional[Path] = None) -> Tuple[bytes, bytes, SubprocessrExit]:
def run(
self, runtime: Runtime, cwd: Optional[Path] = None
) -> Tuple[bytes, bytes, SubprocessrExit]:
if suite.is_dry_run():
print(
self.to_string(runtime),
file=sys.stderr
)
print(self.to_string(runtime), file=sys.stderr)
return b"", b"", SubprocessrExit.Dryrun
else:
cmd = self.get_full_args(runtime)
Expand All @@ -105,7 +120,8 @@ def run(self, runtime: Runtime, cwd: Optional[Path] = None) -> Tuple[bytes, byte
stdout: Optional[bytes]
if self.companion:
companion_p = subprocess.Popen(
self.companion, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self.companion, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
sleep(COMPANION_WAIT_START)
try:
p = subprocess.run(
Expand All @@ -114,7 +130,7 @@ def run(self, runtime: Runtime, cwd: Optional[Path] = None) -> Tuple[bytes, byte
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=self.timeout,
cwd=self.override_cwd if self.override_cwd else cwd
cwd=self.override_cwd if self.override_cwd else cwd,
)
subprocess_exit = SubprocessrExit.Normal
stdout = p.stdout
Expand All @@ -127,12 +143,12 @@ def run(self, runtime: Runtime, cwd: Optional[Path] = None) -> Tuple[bytes, byte
finally:
if self.companion:
try:
companion_stdout, _ = companion_p.communicate(
timeout=10)
companion_stdout, _ = companion_p.communicate(timeout=10)
companion_out += companion_stdout
except subprocess.TimeoutExpired:
logging.warning(
"Companion program not exited after 10 seconds timeout. Trying to kill ...")
"Companion program not exited after 10 seconds timeout. Trying to kill ..."
)
try:
companion_p.kill()
except PermissionError:
Expand All @@ -153,7 +169,7 @@ def __init__(self, program: Path, program_args: List[Union[str, Path]], **kwargs
def __str__(self) -> str:
return self.to_string(DummyRuntime(""))

def attach_modifiers(self, modifiers: Sequence[Modifier]) -> 'BinaryBenchmark':
def attach_modifiers(self, modifiers: Sequence[Modifier]) -> "BinaryBenchmark":
bb = super().attach_modifiers(modifiers)
for m in modifiers:
if not m.should_attach(self.suite_name, self.name):
Expand All @@ -163,11 +179,9 @@ def attach_modifiers(self, modifiers: Sequence[Modifier]) -> 'BinaryBenchmark':
elif type(m) == JVMArg:
logging.warning("JVMArg not respected by BinaryBenchmark")
elif isinstance(m, JVMClasspathAppend) or type(m) == JVMClasspathPrepend:
logging.warning(
"JVMClasspath not respected by BinaryBenchmark")
logging.warning("JVMClasspath not respected by BinaryBenchmark")
elif type(m) == JSArg:
logging.warning(
"JSArg not respected by BinaryBenchmark")
logging.warning("JSArg not respected by BinaryBenchmark")
return bb

def get_full_args(self, _runtime: Runtime) -> List[Union[str, Path]]:
Expand All @@ -178,7 +192,9 @@ def get_full_args(self, _runtime: Runtime) -> List[Union[str, Path]]:


class JavaBenchmark(Benchmark):
def __init__(self, jvm_args: List[str], program_args: List[str], cp: List[str], **kwargs):
def __init__(
self, jvm_args: List[str], program_args: List[str], cp: List[str], **kwargs
):
super().__init__(**kwargs)
self.jvm_args = jvm_args
self.program_args = program_args
Expand All @@ -190,7 +206,7 @@ def get_classpath_args(self) -> List[str]:
def __str__(self) -> str:
return self.to_string(DummyRuntime("java"))

def attach_modifiers(self, modifiers: Sequence[Modifier]) -> 'JavaBenchmark':
def attach_modifiers(self, modifiers: Sequence[Modifier]) -> "JavaBenchmark":
jb = super().attach_modifiers(modifiers)
for m in modifiers:
if not m.should_attach(self.suite_name, self.name):
Expand All @@ -204,8 +220,7 @@ def attach_modifiers(self, modifiers: Sequence[Modifier]) -> 'JavaBenchmark':
elif type(m) == JVMClasspathPrepend:
jb.cp = m.val + jb.cp
elif type(m) == JSArg:
logging.warning(
"JSArg not respected by JavaBenchmark")
logging.warning("JSArg not respected by JavaBenchmark")
return jb

def get_full_args(self, runtime: Runtime) -> List[Union[str, Path]]:
Expand All @@ -218,7 +233,9 @@ def get_full_args(self, runtime: Runtime) -> List[Union[str, Path]]:


class JavaScriptBenchmark(Benchmark):
def __init__(self, js_args: List[str], program: str, program_args: List[str], **kwargs):
def __init__(
self, js_args: List[str], program: str, program_args: List[str], **kwargs
):
super().__init__(**kwargs)
self.js_args = js_args
self.program = program
Expand All @@ -227,7 +244,7 @@ def __init__(self, js_args: List[str], program: str, program_args: List[str], **
def __str__(self) -> str:
return self.to_string(DummyRuntime("js"))

def attach_modifiers(self, modifiers: Sequence[Modifier]) -> 'JavaScriptBenchmark':
def attach_modifiers(self, modifiers: Sequence[Modifier]) -> "JavaScriptBenchmark":
jb = super().attach_modifiers(modifiers)
for m in modifiers:
if not m.should_attach(self.suite_name, self.name):
Expand All @@ -237,8 +254,7 @@ def attach_modifiers(self, modifiers: Sequence[Modifier]) -> 'JavaScriptBenchmar
elif type(m) == JVMArg:
logging.warning("JVMArg not respected by JavaScriptBenchmark")
elif isinstance(m, JVMClasspathAppend) or type(m) == JVMClasspathPrepend:
logging.warning(
"JVMClasspath not respected by JavaScriptBenchmark")
logging.warning("JVMClasspath not respected by JavaScriptBenchmark")
elif type(m) == JSArg:
jb.js_args.extend(m.val)
return jb
Expand All @@ -255,14 +271,19 @@ def get_full_args(self, runtime: Runtime) -> List[Union[str, Path]]:
elif isinstance(runtime, SpiderMonkey):
pass
else:
raise TypeError("{} is of type {}, and not a valid runtime for JavaScriptBenchmark".format(
runtime, type(runtime)))
raise TypeError(
"{} is of type {}, and not a valid runtime for JavaScriptBenchmark".format(
runtime, type(runtime)
)
)
cmd.extend(self.program_args)
return cmd


class JuliaBenchmark(Benchmark):
def __init__(self, julia_args: List[str], suite_path: Path, program_args: List[str], **kwargs):
def __init__(
self, julia_args: List[str], suite_path: Path, program_args: List[str], **kwargs
):
super().__init__(**kwargs)
self.julia_args = julia_args
self.suite_path = suite_path
Expand All @@ -271,7 +292,7 @@ def __init__(self, julia_args: List[str], suite_path: Path, program_args: List[s
def __str__(self) -> str:
return self.to_string(DummyRuntime("julia"))

def attach_modifiers(self, modifiers: Sequence[Modifier]) -> 'JuliaBenchmark':
def attach_modifiers(self, modifiers: Sequence[Modifier]) -> "JuliaBenchmark":
jb = super().attach_modifiers(modifiers)
for m in modifiers:
if type(m) == JuliaArg:
Expand Down
13 changes: 9 additions & 4 deletions src/running/command/fillin.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,11 @@
import subprocess


def fillin(callback: Callable[[int, Iterable[int]], None], levels: int, start: Optional[int] = None):
def fillin(
callback: Callable[[int, Iterable[int]], None],
levels: int,
start: Optional[int] = None,
):
"""Fill the parameter space
The parameter space is from 0, 1, 2, ..., 2^levels (not right-inclusive).
Expand All @@ -23,7 +27,7 @@ def fillin(callback: Callable[[int, Iterable[int]], None], levels: int, start: O
"""
commenced = False
if start is None:
callback(2**levels, range(0, 2 ** levels + 1, 2**(levels-1)))
callback(2**levels, range(0, 2**levels + 1, 2 ** (levels - 1)))
commenced = True
i = 1
while i < levels:
Expand All @@ -32,7 +36,7 @@ def fillin(callback: Callable[[int, Iterable[int]], None], levels: int, start: O
if start is not None and base == start:
commenced = True
if commenced:
callback(2**levels, range(base, 2 ** levels, step))
callback(2**levels, range(base, 2**levels, step))
i += 1


Expand All @@ -43,6 +47,7 @@ def callback(end, ns):
cmd.extend(map(str, ns))
output = subprocess.check_output(cmd)
print(output.decode("utf-8"), end="")

return callback


Expand All @@ -51,7 +56,7 @@ def setup_parser(subparsers):
f.set_defaults(which="fillin")
f.add_argument("PROG")
f.add_argument("LEVELS", type=int)
f.add_argument("START", type=int, nargs='?', default=None)
f.add_argument("START", type=int, nargs="?", default=None)


def run(args):
Expand Down
22 changes: 11 additions & 11 deletions src/running/command/genadvice.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,12 @@
advice_folder = sys.argv[1]


JikesRVM_HEADER = "============================ MMTk Statistics Totals ============================"
JikesRVM_FOOTER = "------------------------------ End MMTk Statistics -----------------------------"
JikesRVM_HEADER = (
"============================ MMTk Statistics Totals ============================"
)
JikesRVM_FOOTER = (
"------------------------------ End MMTk Statistics -----------------------------"
)


def extract_blocks(lines, header, footer):
Expand All @@ -33,7 +37,7 @@ def extract_blocks(lines, header, footer):


def cleanse(filename):
sed_pattern = 's/{urls[^}]*}//g'
sed_pattern = "s/{urls[^}]*}//g"
if sys.platform == "linux" or sys.platform == "linux2":
cmd = ["sed", "-i", sed_pattern, filename]
elif sys.platform == "darwin":
Expand All @@ -46,11 +50,9 @@ def select_best_invocation(scenario):
filename = "{}.log.gz".format(scenario)
metrics = []
with gzip.open(os.path.join(advice_folder, filename)) as log_file:
stats_blocks = extract_blocks(
log_file, JikesRVM_HEADER, JikesRVM_FOOTER)
stats_blocks = extract_blocks(log_file, JikesRVM_HEADER, JikesRVM_FOOTER)
for stats_block in stats_blocks:
stats = dict(zip(stats_block[0].split(
"\t"), stats_block[1].split("\t")))
stats = dict(zip(stats_block[0].split("\t"), stats_block[1].split("\t")))
metrics.append(float(stats["time.gc"]) + float(stats["time.mu"]))
if not metrics:
print("No metric is found")
Expand All @@ -76,14 +78,12 @@ def select_advice_file(scenario, best_invocation):

def main():
scenario_logs = glob.glob(os.path.join(advice_folder, "*.log.gz"))
scenarios = [os.path.basename(s).replace(".log.gz", "")
for s in scenario_logs]
scenarios = [os.path.basename(s).replace(".log.gz", "") for s in scenario_logs]
print("Found scenarios {}".format(scenarios))
for scenario in scenarios:
print("Processing scenario {}".format(scenario))
best_invocation = select_best_invocation(scenario)
print("Best invocation for scenario {} is {}".format(scenario,
best_invocation))
print("Best invocation for scenario {} is {}".format(scenario, best_invocation))
select_advice_file(scenario, best_invocation)


Expand Down
Loading

0 comments on commit e1c10ce

Please sign in to comment.