@@ -1187,7 +1187,21 @@ def rules(self, out, benchmarks, bmSuiteArgs):
11871187
11881188
11891189_baristaConfig = {
1190- "benchmarks" : ["micronaut-hello-world" , "micronaut-shopcart" , "micronaut-similarity" , "quarkus-hello-world" , "quarkus-tika" , "spring-hello-world" , "spring-petclinic" ],
1190+ "benchmarks" : {
1191+ "micronaut-hello-world" : {},
1192+ "micronaut-shopcart" : {},
1193+ "micronaut-similarity" : {},
1194+ "quarkus-hello-world" : {},
1195+ "quarkus-tika-odt" : {
1196+ "barista-bench-name" : "quarkus-tika" ,
1197+ },
1198+ "quarkus-tika-pdf" : {
1199+ "barista-bench-name" : "quarkus-tika" ,
1200+ "workload" : "pdf-workload.barista.json" ,
1201+ },
1202+ "spring-hello-world" : {},
1203+ "spring-petclinic" : {},
1204+ },
11911205 "latency_percentiles" : [50.0 , 75.0 , 90.0 , 99.0 , 99.9 , 99.99 , 99.999 , 100.0 ],
11921206 "rss_percentiles" : [100 , 99 , 98 , 97 , 96 , 95 , 90 , 75 , 50 , 25 ],
11931207 "disable_trackers" : [mx_benchmark .RssTracker , mx_benchmark .PsrecordTracker , mx_benchmark .PsrecordMaxrssTracker , mx_benchmark .RssPercentilesTracker , mx_benchmark .RssPercentilesAndMaxTracker ],
@@ -1215,16 +1229,27 @@ def context(self):
12151229 def context (self , value ):
12161230 self ._context = value
12171231
1232+ def readBaristaVersionFromPyproject (self ):
1233+ # tomllib was included in python standard library with version 3.11
1234+ try :
1235+ import tomllib
1236+ with open (self .baristaProjectConfigurationPath (), mode = "rb" ) as pyproject :
1237+ return tomllib .load (pyproject )["project" ]["version" ]
1238+ except ImportError :
1239+ pass
1240+
1241+ # fallback to 'toml' library if tomllib is not present
1242+ try :
1243+ import toml
1244+ with open (self .baristaProjectConfigurationPath (), mode = "rt" ) as pyproject :
1245+ return toml .loads (pyproject .read ())["project" ]["version" ]
1246+ except ImportError :
1247+ mx .warn ("Could not read the Barista version from the project's `pyproject.toml` file because there is no toml parser installed. Use python3.11+ or install `toml` with pip." )
1248+ return self .defaultSuiteVersion ()
1249+
12181250 def version (self ):
12191251 if self ._version is None :
1220- # tomllib was included in python standard library with version 3.11
1221- try :
1222- import tomllib
1223- with open (self .baristaProjectConfigurationPath (), mode = "rb" ) as pyproject :
1224- self ._version = tomllib .load (pyproject )["project" ]["version" ]
1225- except ImportError :
1226- mx .warn ("Could not read the Barista version from the project's `pyproject.toml` file because the `tomllib` python module is not installed. Use python3.11+ or install `tomllib` with pip." )
1227- self ._version = self .defaultSuiteVersion ()
1252+ self ._version = self .readBaristaVersionFromPyproject ()
12281253 return self ._version
12291254
12301255 def name (self ):
@@ -1240,7 +1265,7 @@ def benchmarkList(self, bmSuiteArgs):
12401265 return self .completeBenchmarkList (bmSuiteArgs )
12411266
12421267 def completeBenchmarkList (self , bmSuiteArgs ):
1243- return _baristaConfig ["benchmarks" ]
1268+ return _baristaConfig ["benchmarks" ]. keys ()
12441269
12451270 def baristaDirectoryPath (self ):
12461271 barista_home = mx .get_env ("BARISTA_HOME" )
@@ -1266,6 +1291,12 @@ def baristaBuilderPath(self):
12661291 def baristaHarnessPath (self ):
12671292 return self .baristaFilePath ("barista" )
12681293
1294+ def baristaHarnessBenchmarkName (self ):
1295+ return _baristaConfig ["benchmarks" ][self .context .benchmark ].get ("barista-bench-name" , self .context .benchmark )
1296+
1297+ def baristaHarnessBenchmarkWorkload (self ):
1298+ return _baristaConfig ["benchmarks" ][self .context .benchmark ].get ("workload" )
1299+
12691300 def validateEnvironment (self ):
12701301 self .baristaProjectConfigurationPath ()
12711302 self .baristaHarnessPath ()
@@ -1290,7 +1321,7 @@ def rules(self, out, benchmarks, bmSuiteArgs):
12901321
12911322 # Startup
12921323 all_rules .append (mx_benchmark .JsonArrayStdOutFileRule (json_file_pattern , json_file_group_name , {
1293- "benchmark" : ( "< benchmark>" , str ) ,
1324+ "benchmark" : self . context . benchmark ,
12941325 "metric.name" : "request-time" ,
12951326 "metric.type" : "numeric" ,
12961327 "metric.unit" : "ms" ,
@@ -1299,11 +1330,11 @@ def rules(self, out, benchmarks, bmSuiteArgs):
12991330 "metric.iteration" : ("<startup.measurements.iteration>" , int ),
13001331 "load-tester.id" : ("<startup.id>" , str ),
13011332 "load-tester.method-type" : "requests"
1302- }, ["benchmark" , " startup.id" , "startup.measurements.iteration" , "startup.measurements.response_time" ]))
1333+ }, ["startup.id" , "startup.measurements.iteration" , "startup.measurements.response_time" ]))
13031334
13041335 # Warmup
13051336 all_rules .append (mx_benchmark .JsonArrayStdOutFileRule (json_file_pattern , json_file_group_name , {
1306- "benchmark" : ( "< benchmark>" , str ) ,
1337+ "benchmark" : self . context . benchmark ,
13071338 "metric.name" : "warmup" ,
13081339 "metric.type" : "numeric" ,
13091340 "metric.unit" : "op/s" ,
@@ -1312,11 +1343,11 @@ def rules(self, out, benchmarks, bmSuiteArgs):
13121343 "metric.iteration" : ("<warmup.measurements.iteration>" , int ),
13131344 "load-tester.id" : ("<warmup.id>" , str ),
13141345 "load-tester.command" : ("<warmup.measurements.command>" , str )
1315- }, ["benchmark" , " warmup.id" , "warmup.measurements.iteration" , "warmup.measurements.throughput" , "warmup.measurements.command" ]))
1346+ }, ["warmup.id" , "warmup.measurements.iteration" , "warmup.measurements.throughput" , "warmup.measurements.command" ]))
13161347
13171348 # Throughput
13181349 all_rules .append (mx_benchmark .JsonArrayStdOutFileRule (json_file_pattern , json_file_group_name , {
1319- "benchmark" : ( "< benchmark>" , str ) ,
1350+ "benchmark" : self . context . benchmark ,
13201351 "metric.name" : "throughput" ,
13211352 "metric.type" : "numeric" ,
13221353 "metric.unit" : "op/s" ,
@@ -1325,11 +1356,11 @@ def rules(self, out, benchmarks, bmSuiteArgs):
13251356 "metric.iteration" : ("<throughput.measurements.iteration>" , int ),
13261357 "load-tester.id" : ("<throughput.id>" , str ),
13271358 "load-tester.command" : ("<throughput.measurements.command>" , str )
1328- }, ["benchmark" , " throughput.id" , "throughput.measurements.iteration" , "throughput.measurements.throughput" , "throughput.measurements.command" ]))
1359+ }, ["throughput.id" , "throughput.measurements.iteration" , "throughput.measurements.throughput" , "throughput.measurements.command" ]))
13291360
13301361 # Latency
13311362 all_rules += [mx_benchmark .JsonArrayStdOutFileRule (json_file_pattern , json_file_group_name , {
1332- "benchmark" : ( "< benchmark>" , str ) ,
1363+ "benchmark" : self . context . benchmark ,
13331364 "metric.name" : "latency" ,
13341365 "metric.type" : "numeric" ,
13351366 "metric.unit" : "ms" ,
@@ -1340,7 +1371,6 @@ def rules(self, out, benchmarks, bmSuiteArgs):
13401371 "load-tester.id" : ("<latency__id>" , str ),
13411372 "load-tester.command" : ("<latency__measurements__final_measurements__command>" , str )
13421373 }, [
1343- "benchmark" ,
13441374 "latency__id" ,
13451375 "latency__measurements__final_measurements__iteration" ,
13461376 f"latency__measurements__final_measurements__p_values__{ float (percentile )} " ,
@@ -1349,24 +1379,26 @@ def rules(self, out, benchmarks, bmSuiteArgs):
13491379
13501380 # Resource Usage
13511381 all_rules += [mx_benchmark .JsonArrayStdOutFileRule (json_file_pattern , json_file_group_name , {
1352- "benchmark" : ( "< benchmark>" , str ) ,
1382+ "benchmark" : self . context . benchmark ,
13531383 "metric.name" : "rss" ,
13541384 "metric.type" : "numeric" ,
13551385 "metric.unit" : "MB" ,
13561386 "metric.value" : (f"<resource_usage__rss__p{ float (percentile )} >" , float ),
13571387 "metric.percentile" : float (percentile ),
13581388 "metric.better" : "lower" ,
13591389 }, [
1360- "benchmark" , f"resource_usage__rss__p{ float (percentile )} "
1390+ f"resource_usage__rss__p{ float (percentile )} "
13611391 ], indexer_str = "__" ) for percentile in _baristaConfig ["rss_percentiles" ]]
1392+ # Ensure we are reporting the analogous numbers across suites (p99 at the time of writing this comment)
1393+ percentile_to_copy_into_max_rss = float (mx_benchmark .RssPercentilesTracker .MaxRssCopyRule .percentile_to_copy_into_max_rss )
13621394 all_rules .append (mx_benchmark .JsonArrayStdOutFileRule (json_file_pattern , json_file_group_name , {
1363- "benchmark" : ( "< benchmark>" , str ) ,
1395+ "benchmark" : self . context . benchmark ,
13641396 "metric.name" : "max-rss" ,
13651397 "metric.type" : "numeric" ,
13661398 "metric.unit" : "MB" ,
1367- "metric.value" : ("<resource_usage__rss__p100.0 >" , float ),
1399+ "metric.value" : (f"<resource_usage__rss__p { percentile_to_copy_into_max_rss } >" , float ),
13681400 "metric.better" : "lower" ,
1369- }, ["benchmark" , f"resource_usage__rss__p100.0 " ], indexer_str = "__" ))
1401+ }, [f"resource_usage__rss__p { percentile_to_copy_into_max_rss } " ], indexer_str = "__" ))
13701402
13711403 return all_rules
13721404
@@ -1467,16 +1499,22 @@ def produceHarnessCommand(self, cmd, suite):
14671499 if mode_match :
14681500 raise ValueError (f"You should not set the Barista '--mode' option manually! Found '{ mode_match .group (0 )} ' in the run arguments!" )
14691501
1502+ # Get bench name and workload to use in the barista harness - we might have custom named benchmarks that need to be mapped
1503+ barista_bench_name = suite .baristaHarnessBenchmarkName ()
1504+ barista_workload = suite .baristaHarnessBenchmarkWorkload ()
1505+
14701506 # Construct the Barista command
14711507 barista_cmd = [suite .baristaHarnessPath ()]
14721508 barista_cmd .append (f"--java-home={ java_exe_match .group (1 )} " )
1509+ if barista_workload is not None :
1510+ barista_cmd .append (f"--config={ barista_workload } " )
14731511 barista_cmd += run_args
14741512 if jvm_vm_options :
14751513 self ._updateCommandOption (barista_cmd , "--vm-options" , "-v" , " " .join (jvm_vm_options ))
14761514 if jvm_cmd_prefix :
14771515 self ._updateCommandOption (barista_cmd , "--cmd-app-prefix" , "-p" , " " .join (jvm_cmd_prefix ))
14781516 barista_cmd += ["--mode" , "jvm" ]
1479- barista_cmd .append (suite . context . benchmark )
1517+ barista_cmd .append (barista_bench_name )
14801518 return barista_cmd
14811519
14821520 class RuntimeContext ():
0 commit comments