diff --git a/compiler/ci/ci_common/benchmark-suites.libsonnet b/compiler/ci/ci_common/benchmark-suites.libsonnet index 033c897b86fc..2ea20d4f134a 100644 --- a/compiler/ci/ci_common/benchmark-suites.libsonnet +++ b/compiler/ci/ci_common/benchmark-suites.libsonnet @@ -172,9 +172,6 @@ run+: [ self.benchmark_cmd + ["specjvm2008:*", "--"] + self.extra_vm_args + ["--", "-ikv", "-it", "240s", "-wt", "120s"] ], - teardown+: [ - ["rm", "-r", "${SPECJVM2008}/results"] - ], timelimit: "3:00:00", forks_batches:: 5, forks_timelimit:: "06:00:00", diff --git a/java-benchmarks/mx.java-benchmarks/mx_java_benchmarks.py b/java-benchmarks/mx.java-benchmarks/mx_java_benchmarks.py index 7b8bd3eb6d69..e2ef50afe467 100644 --- a/java-benchmarks/mx.java-benchmarks/mx_java_benchmarks.py +++ b/java-benchmarks/mx.java-benchmarks/mx_java_benchmarks.py @@ -29,6 +29,7 @@ import os from os.path import join, exists import json +import shutil from shutil import rmtree from tempfile import mkdtemp, mkstemp @@ -1364,8 +1365,7 @@ def workloadSize(self): if 'startup.compiler.compiler' in _allSpecJVM2008BenchesJDK9: _allSpecJVM2008BenchesJDK9.remove('startup.compiler.compiler') - -class SpecJvm2008BenchmarkSuite(mx_benchmark.JavaBenchmarkSuite): +class SpecJvm2008BenchmarkSuite(mx_benchmark.JavaBenchmarkSuite, mx_benchmark.TemporaryWorkdirMixin): """SpecJVM2008 benchmark suite implementation. This benchmark suite can run multiple benchmarks as part of one VM run. @@ -1384,11 +1384,19 @@ def specJvmPath(self): if specjvm2008 is None: mx.abort("Please set the SPECJVM2008 environment variable to a " + "SPECjvm2008 directory.") - jarpath = join(specjvm2008, "SPECjvm2008.jar") + jarname = "SPECjvm2008.jar" + jarpath = join(specjvm2008, jarname) if not exists(jarpath): mx.abort("The SPECJVM2008 environment variable points to a directory " + "without the SPECjvm2008.jar file.") - return jarpath + + # copy to newly-created temporary working directory + working_dir_jarpath = os.path.abspath(join(self.workdir, jarname)) + if not exists(working_dir_jarpath): + mx.log("copying " + specjvm2008 + " to " + self.workdir) + shutil.copytree(specjvm2008, self.workdir, dirs_exist_ok=True) + + return working_dir_jarpath def validateEnvironment(self): if not self.specJvmPath(): @@ -1398,9 +1406,6 @@ def validateEnvironment(self): def validateReturnCode(self, retcode): return retcode == 0 - def workingDirectory(self, benchmarks, bmSuiteArgs): - return mx.get_env("SPECJVM2008") - def createCommandLineArgs(self, benchmarks, bmSuiteArgs): if benchmarks is None: # No benchmark specified in the command line, so run everything. diff --git a/substratevm/mx.substratevm/mx_substratevm_benchmark.py b/substratevm/mx.substratevm/mx_substratevm_benchmark.py index c00124b38943..b42982dca4f8 100644 --- a/substratevm/mx.substratevm/mx_substratevm_benchmark.py +++ b/substratevm/mx.substratevm/mx_substratevm_benchmark.py @@ -627,6 +627,10 @@ class SpecJVM2008NativeImageBenchmarkSuite(mx_java_benchmarks.SpecJvm2008Benchma """ SpecJVM2008 for Native Image """ + # disables formatted report generation since chart generation with JFreeChart loads fonts from disk (from java.home) to compute string width + disable_rendered_report = ["-ctf", "false", "-chf", "false"] + short_run_args = disable_rendered_report + ["-wt", "1", "-it", "1", "-ikv"] + long_run_args = disable_rendered_report + ["-wt", "10", "-it", "5", "-ikv"] def name(self): return 'specjvm2008-native-image' @@ -635,7 +639,8 @@ def benchSuiteName(self, bmSuiteArgs=None): return 'specjvm2008' def createCommandLineArgs(self, benchmarks, bmSuiteArgs): - args = super(SpecJVM2008NativeImageBenchmarkSuite, self).createCommandLineArgs(benchmarks, bmSuiteArgs) + args = super().createCommandLineArgs(benchmarks, bmSuiteArgs) + if benchmarks is None: mx.abort("Suite can only run a single benchmark per VM instance.") elif len(benchmarks) != 1: @@ -644,8 +649,21 @@ def createCommandLineArgs(self, benchmarks, bmSuiteArgs): self.benchmark_name = benchmarks[0] return args + def extra_agent_run_arg(self, benchmark, args, image_run_args): + return super().extra_agent_run_arg(benchmark, args, image_run_args) + SpecJVM2008NativeImageBenchmarkSuite.short_run_args + + def extra_profile_run_arg(self, benchmark, args, image_run_args, should_strip_run_args): + return super().extra_profile_run_arg(benchmark, args, image_run_args, should_strip_run_args) + SpecJVM2008NativeImageBenchmarkSuite.short_run_args + def extra_image_build_argument(self, benchmark, args): - return super(SpecJVM2008NativeImageBenchmarkSuite, self).extra_image_build_argument(benchmark, args) + mx_sdk_vm_impl.svm_experimental_options(['-H:-ParseRuntimeOptions']) + ['-Djava.awt.headless=false'] + # Don't wrap the option `-H:-ParseRuntimeOptions` with `mx_sdk_vm_impl.svm_experimental_options`, as all args are wrapped already. + # The reason to add `-H:CompilationExpirationPeriod` is that we encounter non-deterministic compiler crash due to expiration (GR-50701). + return super().extra_image_build_argument(benchmark, args) + ['-H:-ParseRuntimeOptions', '-H:CompilationExpirationPeriod=600'] + + def extra_run_arg(self, benchmark, args, image_run_args): + return super().extra_run_arg(benchmark, args, image_run_args) + SpecJVM2008NativeImageBenchmarkSuite.long_run_args + def successPatterns(self): + return super().successPatterns() + [_successful_stage_pattern] mx_benchmark.add_bm_suite(SpecJVM2008NativeImageBenchmarkSuite()) diff --git a/vm/mx.vm/mx_vm_benchmark.py b/vm/mx.vm/mx_vm_benchmark.py index 5825de2bdbc1..1f21dd5d52bf 100644 --- a/vm/mx.vm/mx_vm_benchmark.py +++ b/vm/mx.vm/mx_vm_benchmark.py @@ -928,11 +928,11 @@ def run_stage_agent(self, config, stages): if file.endswith(".json"): zipf.write(os.path.join(root, file), os.path.relpath(os.path.join(root, file), os.path.join(path, '..'))) - def run_stage_instrument_image(self, config, stages, out, i, instrumentation_image_name, image_path, image_path_latest, instrumented_iterations): + def run_stage_instrument_image(self, config, stages, out, i, instrumentation_image_name, image_path, image_path_latest, instrumented_iterations, profile_path): executable_name_args = ['-o', instrumentation_image_name] pgo_args = ['--pgo=' + config.latest_profile_path] pgo_args += svm_experimental_options(['-H:' + ('+' if self.pgo_context_sensitive else '-') + 'PGOContextSensitivityEnabled']) - instrument_args = ['--pgo-instrument'] + ([] if i == 0 else pgo_args) + instrument_args = ['--pgo-instrument', '-R:ProfilesDumpFile=' + profile_path] + ([] if i == 0 else pgo_args) if self.jdk_profiles_collect: instrument_args += svm_experimental_options(['-H:+ProfilingEnabled', '-H:+AOTPriorityInline', '-H:-SamplingCollect', f'-H:ProfilingPackagePrefixes={self.generate_profiling_package_prefixes()}']) @@ -963,7 +963,7 @@ def _ensureSamplesAreInProfile(self, profile_path): assert sample["records"][0] > 0, "Sampling profiles seem to have a 0 in records in file " + profile_path def run_stage_instrument_run(self, config, stages, image_path, profile_path): - image_run_cmd = [image_path, '-XX:ProfilesDumpFile=' + profile_path] + image_run_cmd = [image_path] image_run_cmd += config.extra_jvm_args image_run_cmd += config.extra_profile_run_args with stages.set_command(image_run_cmd) as s: @@ -1080,7 +1080,7 @@ def run_java(self, args, out=None, err=None, cwd=None, nonZeroIsFatal=False): image_path = os.path.join(config.output_dir, instrumentation_image_name) image_path_latest = os.path.join(config.output_dir, instrumentation_image_latest) if stages.change_stage('instrument-image', str(i)): - self.run_stage_instrument_image(config, stages, out, i, instrumentation_image_name, image_path, image_path_latest, instrumented_iterations) + self.run_stage_instrument_image(config, stages, out, i, instrumentation_image_name, image_path, image_path_latest, instrumented_iterations, profile_path) if stages.change_stage('instrument-run', str(i)): self.run_stage_instrument_run(config, stages, image_path, profile_path)