Skip to content

Commit 4d6ac8b

Browse files
committed
test(perf): randomize the order of benchmark runs
1 parent 405fae8 commit 4d6ac8b

File tree

1 file changed

+45
-33
lines changed

1 file changed

+45
-33
lines changed

lab/benchmark.py

Lines changed: 45 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
"""Run performance comparisons for versions of coverage"""
22

3+
import collections
34
import contextlib
45
import dataclasses
56
import itertools
67
import os
8+
import random
79
import shutil
810
import statistics
911
import subprocess
@@ -412,7 +414,7 @@ class Env:
412414
shell: ShellSession
413415

414416

415-
ResultData = Dict[Tuple[str, str, str], float]
417+
ResultKey = Tuple[str, str, str]
416418

417419
DIMENSION_NAMES = ["proj", "pyver", "cov"]
418420

@@ -429,10 +431,9 @@ def __init__(
429431
self.py_versions = py_versions
430432
self.cov_versions = cov_versions
431433
self.projects = projects
432-
self.result_data: ResultData = {}
434+
self.result_data: Dict[ResultKey, List[float]] = {}
433435

434436
def run(self, num_runs: int = 3) -> None:
435-
results = []
436437
total_runs = (
437438
len(self.projects)
438439
* len(self.py_versions)
@@ -441,8 +442,10 @@ def run(self, num_runs: int = 3) -> None:
441442
)
442443
total_run_nums = iter(itertools.count(start=1))
443444

445+
all_runs = []
446+
444447
for proj in self.projects:
445-
print(f"Testing with {proj.slug}")
448+
print(f"Prepping project {proj.slug}")
446449
with proj.shell() as shell:
447450
proj.make_dir()
448451
proj.get_source(shell)
@@ -459,37 +462,46 @@ def run(self, num_runs: int = 3) -> None:
459462
print(f"Prepping for {proj.slug} {pyver.slug}")
460463
proj.prep_environment(env)
461464
for cov_ver in self.cov_versions:
462-
durations = []
463-
for run_num in range(num_runs):
464-
total_run_num = next(total_run_nums)
465-
print(
466-
f"Running tests, cov={cov_ver.slug}, "
467-
+ f"{run_num+1} of {num_runs}, "
468-
+ f"total {total_run_num}/{total_runs}"
469-
)
470-
if cov_ver.pip_args is None:
471-
dur = proj.run_no_coverage(env)
472-
else:
473-
dur = proj.run_with_coverage(
474-
env,
475-
cov_ver.pip_args,
476-
cov_ver.tweaks,
477-
)
478-
print(f"Tests took {dur:.3f}s")
479-
durations.append(dur)
480-
med = statistics.median(durations)
481-
result = (
482-
f"Median for {proj.slug}, {pyver.slug}, "
483-
+ f"cov={cov_ver.slug}: {med:.3f}s"
484-
)
485-
print(f"## {result}")
486-
results.append(result)
487-
result_key = (proj.slug, pyver.slug, cov_ver.slug)
488-
self.result_data[result_key] = med
465+
all_runs.append((proj, pyver, cov_ver, env))
466+
467+
all_runs *= num_runs
468+
random.shuffle(all_runs)
469+
470+
run_data: Dict[ResultKey, List[float]] = collections.defaultdict(list)
489471

472+
for proj, pyver, cov_ver, env in all_runs:
473+
total_run_num = next(total_run_nums)
474+
print(
475+
"Running tests: "
476+
+ f"{proj.slug}, {pyver.slug}, cov={cov_ver.slug}, "
477+
+ f"{total_run_num} of {total_runs}"
478+
)
479+
with env.shell:
480+
with change_dir(proj.dir):
481+
if cov_ver.pip_args is None:
482+
dur = proj.run_no_coverage(env)
483+
else:
484+
dur = proj.run_with_coverage(
485+
env,
486+
cov_ver.pip_args,
487+
cov_ver.tweaks,
488+
)
489+
print(f"Tests took {dur:.3f}s")
490+
result_key = (proj.slug, pyver.slug, cov_ver.slug)
491+
run_data[result_key].append(dur)
492+
493+
# Summarize and collect the data.
490494
print("# Results")
491-
for result in results:
492-
print(result)
495+
for proj in self.projects:
496+
for pyver in self.py_versions:
497+
for cov_ver in self.cov_versions:
498+
result_key = (proj.slug, pyver.slug, cov_ver.slug)
499+
med = statistics.median(run_data[result_key])
500+
self.result_data[result_key] = med
501+
print(
502+
f"Median for {proj.slug}, {pyver.slug}, "
503+
+ f"cov={cov_ver.slug}: {med:.3f}s"
504+
)
493505

494506
def show_results(
495507
self,

0 commit comments

Comments
 (0)