Skip to content

Commit 707cf19

Browse files
akolicelkorchi
authored andcommitted
Add first CI barista benchmark jobs.
(cherry picked from commit 9b3a5d7)
1 parent 79a3763 commit 707cf19

File tree

4 files changed

+113
-27
lines changed

4 files changed

+113
-27
lines changed

compiler/ci/ci_common/benchmark-builders.jsonnet

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
c.daily + c.opt_post_merge + hw.e3 + jdk + cc.libgraal + bench.dacapo + PR_bench_libgraal,
1515
c.daily + c.opt_post_merge + hw.e3 + jdk + cc.libgraal + bench.scala_dacapo + PR_bench_libgraal,
1616
c.daily + c.opt_post_merge + hw.e3 + jdk + cc.libgraal + bench.renaissance + PR_bench_libgraal,
17+
c.daily + c.opt_post_merge + hw.e3 + jdk + cc.libgraal + bench.barista,
1718
c.daily + c.opt_post_merge + hw.e3 + jdk + cc.libgraal + bench.specjvm2008 + PR_bench_libgraal,
1819
c.on_demand + hw.e3 + jdk + cc.libgraal + bench.dacapo_size_variants,
1920
c.on_demand + hw.e3 + jdk + cc.libgraal + bench.scala_dacapo_size_variants,

compiler/ci/ci_common/benchmark-suites.libsonnet

Lines changed: 42 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
// convenient sets of benchmark suites for easy reuse
1010
groups:: {
11-
open_suites:: unique_suites([$.awfy, $.dacapo, $.scala_dacapo, $.renaissance]),
11+
open_suites:: unique_suites([$.awfy, $.dacapo, $.scala_dacapo, $.renaissance, $.barista]),
1212
spec_suites:: unique_suites([$.specjvm2008, $.specjbb2015]),
1313
jmh_micros_suites:: unique_suites([$.micros_graal_dist]),
1414
graal_internals_suites:: unique_suites([$.micros_graal_whitebox]),
@@ -112,6 +112,47 @@
112112

113113
renaissance: self.renaissance_template(),
114114

115+
barista_template(suite_version=null, suite_name="barista", max_jdk_version=null, cmd_app_prefix=["hwloc-bind --cpubind node:0.core:0-3.pu:0 --membind node:0"], non_prefix_barista_args=[]):: cc.compiler_benchmark + {
116+
suite:: suite_name,
117+
local barista_version = "v0.2.0",
118+
local suite_version_args = if suite_version != null then ["--bench-suite-version=" + suite_version] else [],
119+
local prefix_barista_arg = if std.length(cmd_app_prefix) > 0 then [std.format("--cmd-app-prefix=%s", std.join(" ", cmd_app_prefix))] else [],
120+
local all_barista_args = prefix_barista_arg + non_prefix_barista_args,
121+
local barista_args_with_separator = if std.length(all_barista_args) > 0 then ["--"] + all_barista_args else [],
122+
downloads+: {
123+
"WRK": { "name": "wrk", "version": "a211dd5", platformspecific: true},
124+
"WRK2": { "name": "wrk2", "version": "2.1", platformspecific: true},
125+
"BARISTA_BENCHMARKS": { "name": "barista", "version": "0.2.0"}
126+
},
127+
packages+: {
128+
maven: "==3.8.6",
129+
"pip:toml": "==0.10.2"
130+
},
131+
setup: [
132+
["set-export", "PATH", "$WRK:$PATH"],
133+
["set-export", "PATH", "$WRK2:$PATH"],
134+
["git", "clone", "--depth", "1", "--branch", barista_version, ["mx", "urlrewrite", "https://github.com/graalvm/barista-suite.git"], "$BARISTA_HOME"],
135+
["cp", "-r", "$BARISTA_BENCHMARKS/*", "$BARISTA_HOME"] // copy the prebuilt jar/nib files
136+
] + super.setup,
137+
run+: [
138+
self.benchmark_cmd + ["barista:*"] + suite_version_args + ["--"] + self.extra_vm_args + barista_args_with_separator
139+
],
140+
notify_emails+: ["[email protected]"],
141+
timelimit: "1:20:00",
142+
should_use_hwloc: false, // hwloc-bind is passed to barista with '--cmd-app-prefix'
143+
environment+: {
144+
BARISTA_HOME: "$BUILD_DIR/barista-suite",
145+
XMX: "500m"
146+
},
147+
min_jdk_version:: 8,
148+
max_jdk_version:: max_jdk_version,
149+
forks_batches:: 3,
150+
bench_forks_per_batch:: 4,
151+
forks_timelimit:: "3:30:00"
152+
},
153+
154+
barista: self.barista_template(),
155+
115156
specjbb2015: cc.compiler_benchmark + c.heap.large_with_large_young_gen + bc.bench_max_threads + {
116157
suite:: "specjbb2015",
117158
downloads+: {

sdk/mx.sdk/mx_sdk_benchmark.py

Lines changed: 62 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1187,7 +1187,21 @@ def rules(self, out, benchmarks, bmSuiteArgs):
11871187

11881188

11891189
_baristaConfig = {
1190-
"benchmarks": ["micronaut-hello-world", "micronaut-shopcart", "micronaut-similarity", "quarkus-hello-world", "quarkus-tika", "spring-hello-world", "spring-petclinic"],
1190+
"benchmarks": {
1191+
"micronaut-hello-world": {},
1192+
"micronaut-shopcart": {},
1193+
"micronaut-similarity": {},
1194+
"quarkus-hello-world": {},
1195+
"quarkus-tika-odt": {
1196+
"barista-bench-name": "quarkus-tika",
1197+
},
1198+
"quarkus-tika-pdf": {
1199+
"barista-bench-name": "quarkus-tika",
1200+
"workload": "pdf-workload.barista.json",
1201+
},
1202+
"spring-hello-world": {},
1203+
"spring-petclinic": {},
1204+
},
11911205
"latency_percentiles": [50.0, 75.0, 90.0, 99.0, 99.9, 99.99, 99.999, 100.0],
11921206
"rss_percentiles": [100, 99, 98, 97, 96, 95, 90, 75, 50, 25],
11931207
"disable_trackers": [mx_benchmark.RssTracker, mx_benchmark.PsrecordTracker, mx_benchmark.PsrecordMaxrssTracker, mx_benchmark.RssPercentilesTracker, mx_benchmark.RssPercentilesAndMaxTracker],
@@ -1215,16 +1229,27 @@ def context(self):
12151229
def context(self, value):
12161230
self._context = value
12171231

1232+
def readBaristaVersionFromPyproject(self):
1233+
# tomllib was included in python standard library with version 3.11
1234+
try:
1235+
import tomllib
1236+
with open(self.baristaProjectConfigurationPath(), mode="rb") as pyproject:
1237+
return tomllib.load(pyproject)["project"]["version"]
1238+
except ImportError:
1239+
pass
1240+
1241+
# fallback to 'toml' library if tomllib is not present
1242+
try:
1243+
import toml
1244+
with open(self.baristaProjectConfigurationPath(), mode="rt") as pyproject:
1245+
return toml.loads(pyproject.read())["project"]["version"]
1246+
except ImportError:
1247+
mx.warn("Could not read the Barista version from the project's `pyproject.toml` file because there is no toml parser installed. Use python3.11+ or install `toml` with pip.")
1248+
return self.defaultSuiteVersion()
1249+
12181250
def version(self):
12191251
if self._version is None:
1220-
# tomllib was included in python standard library with version 3.11
1221-
try:
1222-
import tomllib
1223-
with open(self.baristaProjectConfigurationPath(), mode="rb") as pyproject:
1224-
self._version = tomllib.load(pyproject)["project"]["version"]
1225-
except ImportError:
1226-
mx.warn("Could not read the Barista version from the project's `pyproject.toml` file because the `tomllib` python module is not installed. Use python3.11+ or install `tomllib` with pip.")
1227-
self._version = self.defaultSuiteVersion()
1252+
self._version = self.readBaristaVersionFromPyproject()
12281253
return self._version
12291254

12301255
def name(self):
@@ -1240,7 +1265,7 @@ def benchmarkList(self, bmSuiteArgs):
12401265
return self.completeBenchmarkList(bmSuiteArgs)
12411266

12421267
def completeBenchmarkList(self, bmSuiteArgs):
1243-
return _baristaConfig["benchmarks"]
1268+
return _baristaConfig["benchmarks"].keys()
12441269

12451270
def baristaDirectoryPath(self):
12461271
barista_home = mx.get_env("BARISTA_HOME")
@@ -1266,6 +1291,12 @@ def baristaBuilderPath(self):
12661291
def baristaHarnessPath(self):
12671292
return self.baristaFilePath("barista")
12681293

1294+
def baristaHarnessBenchmarkName(self):
1295+
return _baristaConfig["benchmarks"][self.context.benchmark].get("barista-bench-name", self.context.benchmark)
1296+
1297+
def baristaHarnessBenchmarkWorkload(self):
1298+
return _baristaConfig["benchmarks"][self.context.benchmark].get("workload")
1299+
12691300
def validateEnvironment(self):
12701301
self.baristaProjectConfigurationPath()
12711302
self.baristaHarnessPath()
@@ -1290,7 +1321,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
12901321

12911322
# Startup
12921323
all_rules.append(mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, {
1293-
"benchmark": ("<benchmark>", str),
1324+
"benchmark": self.context.benchmark,
12941325
"metric.name": "request-time",
12951326
"metric.type": "numeric",
12961327
"metric.unit": "ms",
@@ -1299,11 +1330,11 @@ def rules(self, out, benchmarks, bmSuiteArgs):
12991330
"metric.iteration": ("<startup.measurements.iteration>", int),
13001331
"load-tester.id": ("<startup.id>", str),
13011332
"load-tester.method-type": "requests"
1302-
}, ["benchmark", "startup.id", "startup.measurements.iteration", "startup.measurements.response_time"]))
1333+
}, ["startup.id", "startup.measurements.iteration", "startup.measurements.response_time"]))
13031334

13041335
# Warmup
13051336
all_rules.append(mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, {
1306-
"benchmark": ("<benchmark>", str),
1337+
"benchmark": self.context.benchmark,
13071338
"metric.name": "warmup",
13081339
"metric.type": "numeric",
13091340
"metric.unit": "op/s",
@@ -1312,11 +1343,11 @@ def rules(self, out, benchmarks, bmSuiteArgs):
13121343
"metric.iteration": ("<warmup.measurements.iteration>", int),
13131344
"load-tester.id": ("<warmup.id>", str),
13141345
"load-tester.command": ("<warmup.measurements.command>", str)
1315-
}, ["benchmark", "warmup.id", "warmup.measurements.iteration", "warmup.measurements.throughput", "warmup.measurements.command"]))
1346+
}, ["warmup.id", "warmup.measurements.iteration", "warmup.measurements.throughput", "warmup.measurements.command"]))
13161347

13171348
# Throughput
13181349
all_rules.append(mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, {
1319-
"benchmark": ("<benchmark>", str),
1350+
"benchmark": self.context.benchmark,
13201351
"metric.name": "throughput",
13211352
"metric.type": "numeric",
13221353
"metric.unit": "op/s",
@@ -1325,11 +1356,11 @@ def rules(self, out, benchmarks, bmSuiteArgs):
13251356
"metric.iteration": ("<throughput.measurements.iteration>", int),
13261357
"load-tester.id": ("<throughput.id>", str),
13271358
"load-tester.command": ("<throughput.measurements.command>", str)
1328-
}, ["benchmark", "throughput.id", "throughput.measurements.iteration", "throughput.measurements.throughput", "throughput.measurements.command"]))
1359+
}, ["throughput.id", "throughput.measurements.iteration", "throughput.measurements.throughput", "throughput.measurements.command"]))
13291360

13301361
# Latency
13311362
all_rules += [mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, {
1332-
"benchmark": ("<benchmark>", str),
1363+
"benchmark": self.context.benchmark,
13331364
"metric.name": "latency",
13341365
"metric.type": "numeric",
13351366
"metric.unit": "ms",
@@ -1340,7 +1371,6 @@ def rules(self, out, benchmarks, bmSuiteArgs):
13401371
"load-tester.id": ("<latency__id>", str),
13411372
"load-tester.command": ("<latency__measurements__final_measurements__command>", str)
13421373
}, [
1343-
"benchmark",
13441374
"latency__id",
13451375
"latency__measurements__final_measurements__iteration",
13461376
f"latency__measurements__final_measurements__p_values__{float(percentile)}",
@@ -1349,24 +1379,26 @@ def rules(self, out, benchmarks, bmSuiteArgs):
13491379

13501380
# Resource Usage
13511381
all_rules += [mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, {
1352-
"benchmark": ("<benchmark>", str),
1382+
"benchmark": self.context.benchmark,
13531383
"metric.name": "rss",
13541384
"metric.type": "numeric",
13551385
"metric.unit": "MB",
13561386
"metric.value": (f"<resource_usage__rss__p{float(percentile)}>", float),
13571387
"metric.percentile": float(percentile),
13581388
"metric.better": "lower",
13591389
}, [
1360-
"benchmark", f"resource_usage__rss__p{float(percentile)}"
1390+
f"resource_usage__rss__p{float(percentile)}"
13611391
], indexer_str="__") for percentile in _baristaConfig["rss_percentiles"]]
1392+
# Ensure we are reporting the analogous numbers across suites (p99 at the time of writing this comment)
1393+
percentile_to_copy_into_max_rss = float(mx_benchmark.RssPercentilesTracker.MaxRssCopyRule.percentile_to_copy_into_max_rss)
13621394
all_rules.append(mx_benchmark.JsonArrayStdOutFileRule(json_file_pattern, json_file_group_name, {
1363-
"benchmark": ("<benchmark>", str),
1395+
"benchmark": self.context.benchmark,
13641396
"metric.name": "max-rss",
13651397
"metric.type": "numeric",
13661398
"metric.unit": "MB",
1367-
"metric.value": ("<resource_usage__rss__p100.0>", float),
1399+
"metric.value": (f"<resource_usage__rss__p{percentile_to_copy_into_max_rss}>", float),
13681400
"metric.better": "lower",
1369-
}, ["benchmark", f"resource_usage__rss__p100.0"], indexer_str="__"))
1401+
}, [f"resource_usage__rss__p{percentile_to_copy_into_max_rss}"], indexer_str="__"))
13701402

13711403
return all_rules
13721404

@@ -1467,16 +1499,22 @@ def produceHarnessCommand(self, cmd, suite):
14671499
if mode_match:
14681500
raise ValueError(f"You should not set the Barista '--mode' option manually! Found '{mode_match.group(0)}' in the run arguments!")
14691501

1502+
# Get bench name and workload to use in the barista harness - we might have custom named benchmarks that need to be mapped
1503+
barista_bench_name = suite.baristaHarnessBenchmarkName()
1504+
barista_workload = suite.baristaHarnessBenchmarkWorkload()
1505+
14701506
# Construct the Barista command
14711507
barista_cmd = [suite.baristaHarnessPath()]
14721508
barista_cmd.append(f"--java-home={java_exe_match.group(1)}")
1509+
if barista_workload is not None:
1510+
barista_cmd.append(f"--config={barista_workload}")
14731511
barista_cmd += run_args
14741512
if jvm_vm_options:
14751513
self._updateCommandOption(barista_cmd, "--vm-options", "-v", " ".join(jvm_vm_options))
14761514
if jvm_cmd_prefix:
14771515
self._updateCommandOption(barista_cmd, "--cmd-app-prefix", "-p", " ".join(jvm_cmd_prefix))
14781516
barista_cmd += ["--mode", "jvm"]
1479-
barista_cmd.append(suite.context.benchmark)
1517+
barista_cmd.append(barista_bench_name)
14801518
return barista_cmd
14811519

14821520
class RuntimeContext():

substratevm/mx.substratevm/mx_substratevm_benchmark.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -295,7 +295,7 @@ def application_nib(self):
295295
if self.benchmarkName() not in self._application_nibs:
296296
# Run subprocess retrieving the application nib from the Barista 'build' script
297297
out = mx.OutputCapture()
298-
mx.run([self.baristaBuilderPath(), "--get-nib", self.benchmarkName()], out=out)
298+
mx.run([self.baristaBuilderPath(), "--get-nib", self.baristaHarnessBenchmarkName()], out=out)
299299
# Capture the application nib from the Barista 'build' script output
300300
nib_pattern = r"application nib file path is: ([^\n]+)\n"
301301
nib_match = re.search(nib_pattern, out.data)
@@ -435,8 +435,14 @@ def produceHarnessCommand(self, cmd, suite):
435435
nivm_cmd_prefix = cmd[:index_of_app_image]
436436
nivm_app_options = cmd[index_of_app_image + 1:]
437437

438+
# Get bench name and workload to use in the barista harness - we might have custom named benchmarks that need to be mapped
439+
barista_bench_name = suite.baristaHarnessBenchmarkName()
440+
barista_workload = suite.baristaHarnessBenchmarkWorkload()
441+
438442
# Provide image built in the previous stage to the Barista harnesss using the `--app-executable` option
439443
ni_barista_cmd = [suite.baristaHarnessPath(), "--mode", "native", "--app-executable", app_image]
444+
if barista_workload is not None:
445+
ni_barista_cmd.append(f"--config={barista_workload}")
440446
ni_barista_cmd += suite.runArgs(suite.context.bmSuiteArgs)
441447
ni_barista_cmd += mx_sdk_benchmark.parse_prefixed_args("-Dnative-image.benchmark.extra-jvm-arg=", suite.context.bmSuiteArgs)
442448
if stage == mx_sdk_benchmark.Stage.INSTRUMENT_RUN:
@@ -451,7 +457,7 @@ def produceHarnessCommand(self, cmd, suite):
451457
self._updateCommandOption(ni_barista_cmd, "--cmd-app-prefix", "-p", " ".join(nivm_cmd_prefix))
452458
if nivm_app_options:
453459
self._updateCommandOption(ni_barista_cmd, "--app-args", "-a", " ".join(nivm_app_options))
454-
ni_barista_cmd += [suite.benchmarkName()]
460+
ni_barista_cmd += [barista_bench_name]
455461
return ni_barista_cmd
456462

457463

0 commit comments

Comments
 (0)