3030import asyncio
3131import concurrent .futures
3232import dataclasses
33+ import json
3334import os
35+ import statistics
3436import subprocess
3537import time
36-
38+ from deepmerge import Merger
3739
3840import qubesadmin
3941
4042
43+ merger = Merger (
44+ [(list , ["override" ]), (dict , ["merge" ]), (set , ["override" ])],
45+ ["override" ],
46+ ["override" ],
47+ )
48+
49+
4150@dataclasses .dataclass
4251class TestConfig :
4352 """
@@ -50,6 +59,7 @@ class TestConfig:
5059 :param int preload_max: number of disposables to preload
5160 :param bool non_dispvm: target a non disposable qube
5261 :param bool admin_api: use the Admin API directly
62+ :param bool extra_id: base test that extra ID varies from
5363
5464 Notes
5565 -----
@@ -74,8 +84,8 @@ class TestConfig:
7484 it is simpler to achieve.
7585 - Concurrent calls are multiple requests that are done without regards
7686 to the previous request completion.
77- - Concurrency average time is skewed as there are multiples
78- simultaneous calls.
87+ - Concurrency mean time is skewed as there are multiples simultaneous
88+ calls.
7989 Normal VS Preloaded:
8090 - Improving normal qube startup will shorten preload usage time, but
8191 the reverse is not true. Normal disposables are a control group for
@@ -91,6 +101,7 @@ class TestConfig:
91101 preload_max : int = 0
92102 non_dispvm : bool = False
93103 admin_api : bool = False
104+ extra_id : str = ""
94105
95106
96107POLICY_FILE = "/run/qubes/policy.d/10-test-dispvm-perf.policy"
@@ -179,11 +190,13 @@ class TestConfig:
179190 "dispvm-preload-more-api" ,
180191 preload_max = MAX_PRELOAD + 1 ,
181192 admin_api = True ,
193+ extra_id = "dispvm-preload-api" ,
182194 ),
183195 TestConfig (
184196 "dispvm-preload-less-api" ,
185197 preload_max = MAX_PRELOAD - 1 ,
186198 admin_api = True ,
199+ extra_id = "dispvm-preload-api" ,
187200 ),
188201 TestConfig ("dispvm-preload-api" , preload_max = MAX_PRELOAD , admin_api = True ),
189202 TestConfig (
@@ -396,19 +409,22 @@ def run_latency_api_calls(self, test):
396409 qube = self .dvm
397410
398411 results = {}
412+ results ["api_results" ] = {}
413+ results ["api_results" ]["iteration" ] = {}
414+ results ["api_results" ]["stage" ] = {}
399415 start_time = get_time ()
400416 if test .concurrent :
401417 all_results = asyncio .run (self .api_thread (test , service , qube ))
402418 for i in range (1 , self .iterations + 1 ):
403- results [i ] = all_results [i - 1 ]
419+ results ["api_results" ][ "iteration" ][ i ] = all_results [i - 1 ]
404420 else :
405421 for i in range (1 , self .iterations + 1 ):
406- results [i ] = self .call_api (
422+ results ["api_results" ][ "iteration" ][ i ] = self .call_api (
407423 test = test , service = service , qube = qube
408424 )
409425 end_time = get_time ()
410426
411- sample_keys = list (results [1 ].keys ())
427+ sample_keys = list (results ["api_results" ][ "iteration" ][ 1 ].keys ())
412428 value_keys = [k for k in sample_keys if k != "total" ]
413429 headers = (
414430 ["iter" ]
@@ -417,7 +433,7 @@ def run_latency_api_calls(self, test):
417433 + [f"{ k } %" for k in value_keys ]
418434 )
419435 rows = []
420- for key , values in results .items ():
436+ for key , values in results [ "api_results" ][ "iteration" ] .items ():
421437 total = values .get ("total" , 0 )
422438 row_values = [str (key )]
423439 for k in value_keys :
@@ -442,56 +458,63 @@ def run_latency_api_calls(self, test):
442458 " " .join (val .rjust (col_widths [i ]) for i , val in enumerate (row ))
443459 )
444460
461+ values_by_stage = {key : {} for key in sample_keys }
462+ for subdict in results ["api_results" ]["iteration" ].values ():
463+ for key , value in subdict .items ():
464+ values_by_stage [key ].setdefault ("values" , []).append (value )
465+ for key , value in values_by_stage .items ():
466+ values = value ["values" ]
467+ mean = round (statistics .mean (values ), ROUND_PRECISION )
468+ median = round (statistics .median (values ), ROUND_PRECISION )
469+ values_by_stage [key ]["mean" ] = mean
470+ values_by_stage [key ]["median" ] = median
471+ results ["api_results" ]["stage" ].update (values_by_stage )
472+
445473 total_time = round (end_time - start_time , ROUND_PRECISION )
446474 return total_time , results
447475
448476 def report_result (self , test , result ):
449- items = " " .join (
450- "{}={}" .format (key , value ) for key , value in vars (test ).items ()
451- )
477+ try :
478+ template = self .vm1 .template .name
479+ except AttributeError :
480+ template = self .vm1 .name
481+ data = vars (test )
482+ data ["template" ] = str (template )
452483 if test .admin_api :
453484 total_time = result [0 ]
454- average = round (total_time / self .iterations , ROUND_PRECISION )
455- pretty_average = f"{ average :.{ROUND_PRECISION }f} "
456- compiled_result = []
457- for key , value in result [1 ].items ():
458- individual_result = (
459- f"{ key } =("
460- + "," .join (
461- f"{ k } ={ v :.{ROUND_PRECISION }f} " for k , v in value .items ()
462- )
463- + ")"
464- )
465- compiled_result .append (individual_result )
466- items += f" iterations={ self .iterations } average={ pretty_average } "
467- items += " " .join (compiled_result )
485+ data .update (result [1 ].items ())
468486 else :
469487 total_time = result
470- average = total_time / self .iterations
471- pretty_average = f"{ average :.{ROUND_PRECISION }f} "
472- items += f" iterations={ self .iterations } average={ pretty_average } "
488+ mean = round (total_time / self .iterations , ROUND_PRECISION )
489+ data .update (
490+ {
491+ "iterations" : self .iterations ,
492+ "mean" : mean ,
493+ "total" : total_time ,
494+ }
495+ )
496+ pretty_mean = f"{ mean :.{ROUND_PRECISION }f} "
473497 pretty_total_time = f"{ total_time :.{ROUND_PRECISION }f} "
474- final_result = pretty_total_time + " " + items
475498 pretty_items = "iterations=" + str (self .iterations )
476- pretty_items += " average =" + pretty_average
499+ pretty_items += " mean =" + pretty_mean
477500 print (f"Run time ({ pretty_items } ): { pretty_total_time } s" )
478501 results_file = os .environ .get ("QUBES_TEST_PERF_FILE" )
479502 if not results_file :
480503 return
481504 try :
482- if self .vm2 and self .vm1 .template != self .vm2 .template :
483- name_prefix = (
484- f"{ self .vm1 .template !s} _" f"{ self .vm2 .template !s} _"
485- )
486- else :
487- name_prefix = f"{ self .vm1 .template !s} _"
505+ name_prefix = f"{ template !s} _"
488506 except AttributeError :
489- if self .vm2 :
490- name_prefix = f"{ self .vm1 !s} _{ self .vm2 !s} _"
491- else :
492- name_prefix = f"{ self .vm1 !s} _"
493- with open (results_file , "a" , encoding = "ascii" ) as file :
494- file .write (name_prefix + test .name + " " + str (final_result ) + "\n " )
507+ name_prefix = f"{ template !s} _"
508+ data_final = {}
509+ data_final [name_prefix + test .name ] = data
510+ try :
511+ with open (results_file , "r" , encoding = "ascii" ) as file :
512+ old_data = json .load (file )
513+ except (FileNotFoundError , json .JSONDecodeError ):
514+ old_data = {}
515+ data_final = merger .merge (old_data , data_final )
516+ with open (results_file , "w" , encoding = "ascii" ) as file :
517+ json .dump (data_final , file , indent = 2 )
495518
496519 def run_test (self , test : TestConfig ):
497520 with open (POLICY_FILE , "w" , encoding = "ascii" ) as policy :
0 commit comments