30
30
import asyncio
31
31
import concurrent .futures
32
32
import dataclasses
33
+ import json
33
34
import os
35
+ import statistics
34
36
import subprocess
35
37
import time
36
-
38
+ from deepmerge import Merger
37
39
38
40
import qubesadmin
39
41
40
42
43
+ merger = Merger (
44
+ [(list , ["override" ]), (dict , ["merge" ]), (set , ["override" ])],
45
+ ["override" ],
46
+ ["override" ],
47
+ )
48
+
49
+
41
50
@dataclasses .dataclass
42
51
class TestConfig :
43
52
"""
@@ -50,6 +59,7 @@ class TestConfig:
50
59
:param int preload_max: number of disposables to preload
51
60
:param bool non_dispvm: target a non disposable qube
52
61
:param bool admin_api: use the Admin API directly
62
+ :param bool extra_id: base test that extra ID varies from
53
63
54
64
Notes
55
65
-----
@@ -74,8 +84,8 @@ class TestConfig:
74
84
it is simpler to achieve.
75
85
- Concurrent calls are multiple requests that are done without regards
76
86
to the previous request completion.
77
- - Concurrency average time is skewed as there are multiples
78
- simultaneous calls.
87
+ - Concurrency mean time is skewed as there are multiples simultaneous
88
+ calls.
79
89
Normal VS Preloaded:
80
90
- Improving normal qube startup will shorten preload usage time, but
81
91
the reverse is not true. Normal disposables are a control group for
@@ -91,6 +101,7 @@ class TestConfig:
91
101
preload_max : int = 0
92
102
non_dispvm : bool = False
93
103
admin_api : bool = False
104
+ extra_id : str = ""
94
105
95
106
96
107
POLICY_FILE = "/run/qubes/policy.d/10-test-dispvm-perf.policy"
@@ -179,11 +190,13 @@ class TestConfig:
179
190
"dispvm-preload-more-api" ,
180
191
preload_max = MAX_PRELOAD + 1 ,
181
192
admin_api = True ,
193
+ extra_id = "dispvm-preload-api" ,
182
194
),
183
195
TestConfig (
184
196
"dispvm-preload-less-api" ,
185
197
preload_max = MAX_PRELOAD - 1 ,
186
198
admin_api = True ,
199
+ extra_id = "dispvm-preload-api" ,
187
200
),
188
201
TestConfig ("dispvm-preload-api" , preload_max = MAX_PRELOAD , admin_api = True ),
189
202
TestConfig (
@@ -396,19 +409,22 @@ def run_latency_api_calls(self, test):
396
409
qube = self .dvm
397
410
398
411
results = {}
412
+ results ["api_results" ] = {}
413
+ results ["api_results" ]["iteration" ] = {}
414
+ results ["api_results" ]["stage" ] = {}
399
415
start_time = get_time ()
400
416
if test .concurrent :
401
417
all_results = asyncio .run (self .api_thread (test , service , qube ))
402
418
for i in range (1 , self .iterations + 1 ):
403
- results [i ] = all_results [i - 1 ]
419
+ results ["api_results" ][ "iteration" ][ i ] = all_results [i - 1 ]
404
420
else :
405
421
for i in range (1 , self .iterations + 1 ):
406
- results [i ] = self .call_api (
422
+ results ["api_results" ][ "iteration" ][ i ] = self .call_api (
407
423
test = test , service = service , qube = qube
408
424
)
409
425
end_time = get_time ()
410
426
411
- sample_keys = list (results [1 ].keys ())
427
+ sample_keys = list (results ["api_results" ][ "iteration" ][ 1 ].keys ())
412
428
value_keys = [k for k in sample_keys if k != "total" ]
413
429
headers = (
414
430
["iter" ]
@@ -417,7 +433,7 @@ def run_latency_api_calls(self, test):
417
433
+ [f"{ k } %" for k in value_keys ]
418
434
)
419
435
rows = []
420
- for key , values in results .items ():
436
+ for key , values in results [ "api_results" ][ "iteration" ] .items ():
421
437
total = values .get ("total" , 0 )
422
438
row_values = [str (key )]
423
439
for k in value_keys :
@@ -442,56 +458,63 @@ def run_latency_api_calls(self, test):
442
458
" " .join (val .rjust (col_widths [i ]) for i , val in enumerate (row ))
443
459
)
444
460
461
+ values_by_stage = {key : {} for key in sample_keys }
462
+ for subdict in results ["api_results" ]["iteration" ].values ():
463
+ for key , value in subdict .items ():
464
+ values_by_stage [key ].setdefault ("values" , []).append (value )
465
+ for key , value in values_by_stage .items ():
466
+ values = value ["values" ]
467
+ mean = round (statistics .mean (values ), ROUND_PRECISION )
468
+ median = round (statistics .median (values ), ROUND_PRECISION )
469
+ values_by_stage [key ]["mean" ] = mean
470
+ values_by_stage [key ]["median" ] = median
471
+ results ["api_results" ]["stage" ].update (values_by_stage )
472
+
445
473
total_time = round (end_time - start_time , ROUND_PRECISION )
446
474
return total_time , results
447
475
448
476
def report_result (self , test , result ):
449
- items = " " .join (
450
- "{}={}" .format (key , value ) for key , value in vars (test ).items ()
451
- )
477
+ try :
478
+ template = self .vm1 .template .name
479
+ except AttributeError :
480
+ template = self .vm1 .name
481
+ data = vars (test )
482
+ data ["template" ] = str (template )
452
483
if test .admin_api :
453
484
total_time = result [0 ]
454
- average = round (total_time / self .iterations , ROUND_PRECISION )
455
- pretty_average = f"{ average :.{ROUND_PRECISION }f} "
456
- compiled_result = []
457
- for key , value in result [1 ].items ():
458
- individual_result = (
459
- f"{ key } =("
460
- + "," .join (
461
- f"{ k } ={ v :.{ROUND_PRECISION }f} " for k , v in value .items ()
462
- )
463
- + ")"
464
- )
465
- compiled_result .append (individual_result )
466
- items += f" iterations={ self .iterations } average={ pretty_average } "
467
- items += " " .join (compiled_result )
485
+ data .update (result [1 ].items ())
468
486
else :
469
487
total_time = result
470
- average = total_time / self .iterations
471
- pretty_average = f"{ average :.{ROUND_PRECISION }f} "
472
- items += f" iterations={ self .iterations } average={ pretty_average } "
488
+ mean = round (total_time / self .iterations , ROUND_PRECISION )
489
+ data .update (
490
+ {
491
+ "iterations" : self .iterations ,
492
+ "mean" : mean ,
493
+ "total_time" : total_time ,
494
+ }
495
+ )
496
+ pretty_mean = f"{ mean :.{ROUND_PRECISION }f} "
473
497
pretty_total_time = f"{ total_time :.{ROUND_PRECISION }f} "
474
- final_result = pretty_total_time + " " + items
475
498
pretty_items = "iterations=" + str (self .iterations )
476
- pretty_items += " average =" + pretty_average
499
+ pretty_items += " mean =" + pretty_mean
477
500
print (f"Run time ({ pretty_items } ): { pretty_total_time } s" )
478
501
results_file = os .environ .get ("QUBES_TEST_PERF_FILE" )
479
502
if not results_file :
480
503
return
481
504
try :
482
- if self .vm2 and self .vm1 .template != self .vm2 .template :
483
- name_prefix = (
484
- f"{ self .vm1 .template !s} _" f"{ self .vm2 .template !s} _"
485
- )
486
- else :
487
- name_prefix = f"{ self .vm1 .template !s} _"
505
+ name_prefix = f"{ template !s} _"
488
506
except AttributeError :
489
- if self .vm2 :
490
- name_prefix = f"{ self .vm1 !s} _{ self .vm2 !s} _"
491
- else :
492
- name_prefix = f"{ self .vm1 !s} _"
493
- with open (results_file , "a" , encoding = "ascii" ) as file :
494
- file .write (name_prefix + test .name + " " + str (final_result ) + "\n " )
507
+ name_prefix = f"{ template !s} _"
508
+ data_final = {}
509
+ data_final [name_prefix + test .name ] = data
510
+ try :
511
+ with open (results_file , "r" , encoding = "ascii" ) as file :
512
+ old_data = json .load (file )
513
+ except (FileNotFoundError , json .JSONDecodeError ):
514
+ old_data = {}
515
+ data_final = merger .merge (old_data , data_final )
516
+ with open (results_file , "w" , encoding = "ascii" ) as file :
517
+ json .dump (data_final , file )
495
518
496
519
def run_test (self , test : TestConfig ):
497
520
with open (POLICY_FILE , "w" , encoding = "ascii" ) as policy :
0 commit comments