|
30 | 30 | import asyncio
|
31 | 31 | import concurrent.futures
|
32 | 32 | import dataclasses
|
33 |
| -import logging |
34 | 33 | import json
|
| 34 | +import logging |
35 | 35 | import os
|
36 | 36 | import statistics
|
37 | 37 | import subprocess
|
38 | 38 | import time
|
| 39 | +import yaml |
| 40 | +from datetime import datetime, timezone |
39 | 41 |
|
40 | 42 | from deepmerge import Merger
|
41 | 43 | import qubesadmin
|
@@ -125,8 +127,10 @@ class TestConfig:
|
125 | 127 | # The preload number is set to MAX_CONCURRENCY on concurrent calls. This number
|
126 | 128 | # is also used by non preloaded disposables to set the maximum workers/jobs.
|
127 | 129 | MAX_CONCURRENCY = MAX_PRELOAD * 2
|
128 |
| -# How was this amazing algorithm chosen? Yes. |
129 |
| -ITERATIONS = MAX_CONCURRENCY * 4 |
| 130 | +# A value that is not too short that would impact accuracy and not too long to |
| 131 | +# burn OpenQA. It is a multiple of MAX_CONCURRENCY so there is no remainder |
| 132 | +# when using concurrency. |
| 133 | +ITERATIONS = MAX_CONCURRENCY * 3 |
130 | 134 | # A small round precision excludes noise. It is also used to have 0 padding (as
|
131 | 135 | # a string) to align fields.
|
132 | 136 | ROUND_PRECISION = 3
|
@@ -249,6 +253,30 @@ def get_time():
|
249 | 253 | return time.clock_gettime(time.CLOCK_MONOTONIC)
|
250 | 254 |
|
251 | 255 |
|
| 256 | +def hcl() -> dict: |
| 257 | + completed_process = subprocess.run( |
| 258 | + ["qubes-hcl-report", "--yaml-only"], capture_output=True, check=True |
| 259 | + ) |
| 260 | + report = yaml.safe_load(completed_process.stdout) |
| 261 | + data = { |
| 262 | + "hcl-qubes": report["versions"][0]["qubes"].rstrip(), |
| 263 | + "hcl-xen": report["versions"][0]["xen"].rstrip(), |
| 264 | + "hcl-kernel": report["versions"][0]["kernel"].rstrip(), |
| 265 | + "hcl-memory": int(report["memory"].rstrip()), |
| 266 | + } |
| 267 | + if os.environ.get("QUBES_TEST_PERF_HWINFO"): |
| 268 | + data.update( |
| 269 | + { |
| 270 | + "hcl-certified": report["certified"].rstrip() != "no", |
| 271 | + "hcl-brand": report["brand"].rstrip(), |
| 272 | + "hcl-model": report["model"].rstrip(), |
| 273 | + "hcl-bios": report["bios"].rstrip(), |
| 274 | + "hcl-cpu": report["cpu"].rstrip(), |
| 275 | + } |
| 276 | + ) |
| 277 | + return data |
| 278 | + |
| 279 | + |
252 | 280 | class TestRun:
|
253 | 281 | def __init__(self, dom0, dvm, vm1, vm2):
|
254 | 282 | self.dom0 = dom0
|
@@ -537,13 +565,52 @@ def report_result(self, test, result):
|
537 | 565 | else:
|
538 | 566 | total_time = result
|
539 | 567 | mean = round(total_time / self.iterations, ROUND_PRECISION)
|
| 568 | + |
540 | 569 | data.update(
|
541 | 570 | {
|
542 | 571 | "iterations": self.iterations,
|
543 | 572 | "mean": mean,
|
544 | 573 | "total": total_time,
|
| 574 | + "date": datetime.now(timezone.utc).strftime( |
| 575 | + "%Y-%m-%dT%H:%M:%S" |
| 576 | + ), |
545 | 577 | }
|
546 | 578 | )
|
| 579 | + |
| 580 | + template_properties = {} |
| 581 | + int_properties = [ |
| 582 | + "memory", |
| 583 | + "maxmem", |
| 584 | + "vcpus", |
| 585 | + "qrexec_timeout", |
| 586 | + "shutdown_timeout", |
| 587 | + ] |
| 588 | + wanted_properties = [*int_properties, "kernel", "kernelopts"] |
| 589 | + for prop in wanted_properties: |
| 590 | + val = getattr(self.vm1, prop, "") |
| 591 | + if prop in int_properties: |
| 592 | + val = int(val or 0) |
| 593 | + template_properties[prop] = val |
| 594 | + data.update(template_properties) |
| 595 | + |
| 596 | + template_features = {} |
| 597 | + int_features = ["os-version"] |
| 598 | + wanted_features = [ |
| 599 | + "template-buildtime", |
| 600 | + "last-update", |
| 601 | + "os", |
| 602 | + "os-distribution", |
| 603 | + *int_features, |
| 604 | + ] |
| 605 | + for feature in wanted_features: |
| 606 | + val = self.vm1.features.check_with_template(feature, "") |
| 607 | + if feature in int_features: |
| 608 | + val = int(val or 0) |
| 609 | + template_features[feature] = val |
| 610 | + data.update(template_features) |
| 611 | + |
| 612 | + data.update(hcl()) |
| 613 | + |
547 | 614 | pretty_mean = f"{mean:.{ROUND_PRECISION}f}"
|
548 | 615 | pretty_total_time = f"{total_time:.{ROUND_PRECISION}f}"
|
549 | 616 | pretty_items = "iterations=" + str(self.iterations)
|
@@ -659,7 +726,9 @@ def run_test(self, test: TestConfig):
|
659 | 726 | def main():
|
660 | 727 | parser = argparse.ArgumentParser(
|
661 | 728 | epilog="You can set QUBES_TEST_PERF_FILE env variable to a path where "
|
662 |
| - "machine-readable results should be saved." |
| 729 | + "machine-readable results should be saved. If you want to share a " |
| 730 | + "detailed result containing hardware information, set " |
| 731 | + "QUBES_TEST_PERF_HWINFO to a non empty value." |
663 | 732 | )
|
664 | 733 | parser.add_argument("--dvm", required=True)
|
665 | 734 | parser.add_argument("--vm1", required=True)
|
|
0 commit comments