From 22b281915c305e16518790fa25c030eb2078d25e Mon Sep 17 00:00:00 2001 From: Diego Russo Date: Wed, 2 Oct 2024 15:05:22 +0100 Subject: [PATCH] Pass --timeout flag to pyperf (#354) * Implement timeout mechanism for a benchmark run * Address feedbacks * Pin pyperf to 2.8.0 Update test_commands to reflect a change how pyperf reports data. --- doc/usage.rst | 6 ++++-- pyperformance/_benchmark.py | 6 +++++- pyperformance/cli.py | 11 +++++++++++ pyperformance/commands.py | 4 ++-- pyperformance/requirements/requirements.txt | 2 +- pyperformance/run.py | 13 +++++++++++-- pyperformance/tests/test_commands.py | 12 ++++++------ 7 files changed, 40 insertions(+), 14 deletions(-) diff --git a/doc/usage.rst b/doc/usage.rst index 6ebd5153..ae753af7 100644 --- a/doc/usage.rst +++ b/doc/usage.rst @@ -102,8 +102,8 @@ Usage:: pyperformance run [-h] [-r] [-f] [--debug-single-value] [-v] [-m] [--affinity CPU_LIST] [-o FILENAME] [--append FILENAME] [--manifest MANIFEST] - [-b BM_LIST] [--inherit-environ VAR_LIST] - [-p PYTHON] + [--timeout TIMEOUT] [-b BM_LIST] + [--inherit-environ VAR_LIST] [-p PYTHON] options:: @@ -124,6 +124,8 @@ options:: baseline_python, not changed_python. --append FILENAME Add runs to an existing file, or create it if it doesn't exist + --timeout TIMEOUT Specify a timeout in seconds for a single + benchmark run (default: disabled) --manifest MANIFEST benchmark manifest file to use -b BM_LIST, --benchmarks BM_LIST Comma-separated list of benchmarks to run. Can diff --git a/pyperformance/_benchmark.py b/pyperformance/_benchmark.py index 8ca5eaac..5ec6fe07 100644 --- a/pyperformance/_benchmark.py +++ b/pyperformance/_benchmark.py @@ -233,7 +233,11 @@ def _run_perf_script(python, runscript, runid, *, sys.stderr.flush() sys.stderr.write(stderr) sys.stderr.flush() - raise RuntimeError("Benchmark died") + # pyperf returns exit code 124 if the benchmark execution times out + if ec == 124: + raise TimeoutError("Benchmark timed out") + else: + raise RuntimeError("Benchmark died") return pyperf.BenchmarkSuite.load(tmp) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 843ed6a6..3348f62e 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -25,6 +25,13 @@ def comma_separated(values): return list(filter(None, values)) +def check_positive(value): + value = int(value) + if value <= 0: + raise argparse.ArgumentTypeError("Argument must a be positive integer.") + return value + + def filter_opts(cmd, *, allow_no_benchmarks=False): cmd.add_argument("--manifest", help="benchmark manifest file to use") @@ -82,6 +89,10 @@ def parse_args(): help="Use the same number of loops as a previous run " "(i.e., don't recalibrate). Should be a path to a " ".json file from a previous run.") + cmd.add_argument("--timeout", + help="Specify a timeout in seconds for a single " + "benchmark run (default: disabled)", + type=check_positive) filter_opts(cmd) # show diff --git a/pyperformance/commands.py b/pyperformance/commands.py index ade1cb12..7cfa4033 100644 --- a/pyperformance/commands.py +++ b/pyperformance/commands.py @@ -191,8 +191,8 @@ def cmd_run(options, benchmarks): if errors: print("%s benchmarks failed:" % len(errors)) - for name in errors: - print("- %s" % name) + for name, reason in errors: + print("- %s (%s)" % (name, reason)) print() sys.exit(1) diff --git a/pyperformance/requirements/requirements.txt b/pyperformance/requirements/requirements.txt index 80be7f29..4419cca8 100644 --- a/pyperformance/requirements/requirements.txt +++ b/pyperformance/requirements/requirements.txt @@ -10,5 +10,5 @@ psutil==5.9.5 # via # -r requirements.in # pyperf -pyperf==2.7.0 +pyperf==2.8.0 # via -r requirements.in diff --git a/pyperformance/run.py b/pyperformance/run.py index c7865b84..67ab5d89 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -164,7 +164,7 @@ def add_bench(dest_suite, obj): bench_venv, bench_runid = benchmarks.get(bench) if bench_venv is None: print("ERROR: Benchmark %s failed: could not install requirements" % name) - errors.append(name) + errors.append((name, "Install requirements error")) continue try: result = bench.run( @@ -174,10 +174,17 @@ def add_bench(dest_suite, obj): venv=bench_venv, verbose=options.verbose, ) + except TimeoutError as exc: + print("ERROR: Benchmark %s timed out" % name) + errors.append((name, exc)) + except RuntimeError as exc: + print("ERROR: Benchmark %s failed: %s" % (name, exc)) + traceback.print_exc() + errors.append((name, exc)) except Exception as exc: print("ERROR: Benchmark %s failed: %s" % (name, exc)) traceback.print_exc() - errors.append(name) + errors.append((name, exc)) else: suite = add_bench(suite, result) @@ -233,5 +240,7 @@ def get_pyperf_opts(options): opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ)) if options.min_time: opts.append('--min-time=%s' % options.min_time) + if options.timeout: + opts.append('--timeout=%s' % options.timeout) return opts diff --git a/pyperformance/tests/test_commands.py b/pyperformance/tests/test_commands.py index 7c311cf8..870a58bc 100644 --- a/pyperformance/tests/test_commands.py +++ b/pyperformance/tests/test_commands.py @@ -399,7 +399,7 @@ def test_compare_single_value(self): Performance version: 0.2 ### call_simple ### - 7896.0 kB -> 7900.0 kB: 1.00x larger + 7896.0 KiB -> 7900.0 KiB: 1.00x larger ''').lstrip()) def test_compare_csv(self): @@ -458,11 +458,11 @@ def test_compare_table_single_value(self): Performance version: 0.2 - +-------------+-----------+-----------+--------------+------------------------------------------+ - | Benchmark | mem1.json | mem2.json | Change | Significance | - +=============+===========+===========+==============+==========================================+ - | call_simple | 7896.0 kB | 7900.0 kB | 1.00x larger | (benchmark only contains a single value) | - +-------------+-----------+-----------+--------------+------------------------------------------+ + +-------------+------------+------------+--------------+------------------------------------------+ + | Benchmark | mem1.json | mem2.json | Change | Significance | + +=============+============+============+==============+==========================================+ + | call_simple | 7896.0 KiB | 7900.0 KiB | 1.00x larger | (benchmark only contains a single value) | + +-------------+------------+------------+--------------+------------------------------------------+ ''').lstrip())