1
1
"""Run performance comparisons for versions of coverage"""
2
2
3
+ import collections
3
4
import contextlib
4
5
import dataclasses
5
6
import itertools
6
7
import os
8
+ import random
7
9
import shutil
8
10
import statistics
9
11
import subprocess
@@ -412,7 +414,7 @@ class Env:
412
414
shell : ShellSession
413
415
414
416
415
- ResultData = Dict [ Tuple [str , str , str ], float ]
417
+ ResultKey = Tuple [str , str , str ]
416
418
417
419
DIMENSION_NAMES = ["proj" , "pyver" , "cov" ]
418
420
@@ -429,10 +431,9 @@ def __init__(
429
431
self .py_versions = py_versions
430
432
self .cov_versions = cov_versions
431
433
self .projects = projects
432
- self .result_data : ResultData = {}
434
+ self .result_data : Dict [ ResultKey , List [ float ]] = {}
433
435
434
436
def run (self , num_runs : int = 3 ) -> None :
435
- results = []
436
437
total_runs = (
437
438
len (self .projects )
438
439
* len (self .py_versions )
@@ -441,8 +442,10 @@ def run(self, num_runs: int = 3) -> None:
441
442
)
442
443
total_run_nums = iter (itertools .count (start = 1 ))
443
444
445
+ all_runs = []
446
+
444
447
for proj in self .projects :
445
- print (f"Testing with { proj .slug } " )
448
+ print (f"Prepping project { proj .slug } " )
446
449
with proj .shell () as shell :
447
450
proj .make_dir ()
448
451
proj .get_source (shell )
@@ -459,37 +462,46 @@ def run(self, num_runs: int = 3) -> None:
459
462
print (f"Prepping for { proj .slug } { pyver .slug } " )
460
463
proj .prep_environment (env )
461
464
for cov_ver in self .cov_versions :
462
- durations = []
463
- for run_num in range (num_runs ):
464
- total_run_num = next (total_run_nums )
465
- print (
466
- f"Running tests, cov={ cov_ver .slug } , "
467
- + f"{ run_num + 1 } of { num_runs } , "
468
- + f"total { total_run_num } /{ total_runs } "
469
- )
470
- if cov_ver .pip_args is None :
471
- dur = proj .run_no_coverage (env )
472
- else :
473
- dur = proj .run_with_coverage (
474
- env ,
475
- cov_ver .pip_args ,
476
- cov_ver .tweaks ,
477
- )
478
- print (f"Tests took { dur :.3f} s" )
479
- durations .append (dur )
480
- med = statistics .median (durations )
481
- result = (
482
- f"Median for { proj .slug } , { pyver .slug } , "
483
- + f"cov={ cov_ver .slug } : { med :.3f} s"
484
- )
485
- print (f"## { result } " )
486
- results .append (result )
487
- result_key = (proj .slug , pyver .slug , cov_ver .slug )
488
- self .result_data [result_key ] = med
465
+ all_runs .append ((proj , pyver , cov_ver , env ))
466
+
467
+ all_runs *= num_runs
468
+ random .shuffle (all_runs )
469
+
470
+ run_data : Dict [ResultKey , List [float ]] = collections .defaultdict (list )
489
471
472
+ for proj , pyver , cov_ver , env in all_runs :
473
+ total_run_num = next (total_run_nums )
474
+ print (
475
+ "Running tests: "
476
+ + f"{ proj .slug } , { pyver .slug } , cov={ cov_ver .slug } , "
477
+ + f"{ total_run_num } of { total_runs } "
478
+ )
479
+ with env .shell :
480
+ with change_dir (proj .dir ):
481
+ if cov_ver .pip_args is None :
482
+ dur = proj .run_no_coverage (env )
483
+ else :
484
+ dur = proj .run_with_coverage (
485
+ env ,
486
+ cov_ver .pip_args ,
487
+ cov_ver .tweaks ,
488
+ )
489
+ print (f"Tests took { dur :.3f} s" )
490
+ result_key = (proj .slug , pyver .slug , cov_ver .slug )
491
+ run_data [result_key ].append (dur )
492
+
493
+ # Summarize and collect the data.
490
494
print ("# Results" )
491
- for result in results :
492
- print (result )
495
+ for proj in self .projects :
496
+ for pyver in self .py_versions :
497
+ for cov_ver in self .cov_versions :
498
+ result_key = (proj .slug , pyver .slug , cov_ver .slug )
499
+ med = statistics .median (run_data [result_key ])
500
+ self .result_data [result_key ] = med
501
+ print (
502
+ f"Median for { proj .slug } , { pyver .slug } , "
503
+ + f"cov={ cov_ver .slug } : { med :.3f} s"
504
+ )
493
505
494
506
def show_results (
495
507
self ,
0 commit comments