Skip to content

Commit

Permalink
Pass --timeout flag to pyperf (#354)
Browse files Browse the repository at this point in the history
* Implement timeout mechanism for a benchmark run

* Address feedbacks

* Pin pyperf to 2.8.0

Update test_commands to reflect a change how pyperf reports data.
  • Loading branch information
diegorusso authored Oct 2, 2024
1 parent 0e9646c commit 22b2819
Show file tree
Hide file tree
Showing 7 changed files with 40 additions and 14 deletions.
6 changes: 4 additions & 2 deletions doc/usage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -102,8 +102,8 @@ Usage::
pyperformance run [-h] [-r] [-f] [--debug-single-value] [-v] [-m]
[--affinity CPU_LIST] [-o FILENAME]
[--append FILENAME] [--manifest MANIFEST]
[-b BM_LIST] [--inherit-environ VAR_LIST]
[-p PYTHON]
[--timeout TIMEOUT] [-b BM_LIST]
[--inherit-environ VAR_LIST] [-p PYTHON]

options::

Expand All @@ -124,6 +124,8 @@ options::
baseline_python, not changed_python.
--append FILENAME Add runs to an existing file, or create it if
it doesn't exist
--timeout TIMEOUT Specify a timeout in seconds for a single
benchmark run (default: disabled)
--manifest MANIFEST benchmark manifest file to use
-b BM_LIST, --benchmarks BM_LIST
Comma-separated list of benchmarks to run. Can
Expand Down
6 changes: 5 additions & 1 deletion pyperformance/_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,11 @@ def _run_perf_script(python, runscript, runid, *,
sys.stderr.flush()
sys.stderr.write(stderr)
sys.stderr.flush()
raise RuntimeError("Benchmark died")
# pyperf returns exit code 124 if the benchmark execution times out
if ec == 124:
raise TimeoutError("Benchmark timed out")
else:
raise RuntimeError("Benchmark died")
return pyperf.BenchmarkSuite.load(tmp)


Expand Down
11 changes: 11 additions & 0 deletions pyperformance/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,13 @@ def comma_separated(values):
return list(filter(None, values))


def check_positive(value):
value = int(value)
if value <= 0:
raise argparse.ArgumentTypeError("Argument must a be positive integer.")
return value


def filter_opts(cmd, *, allow_no_benchmarks=False):
cmd.add_argument("--manifest", help="benchmark manifest file to use")

Expand Down Expand Up @@ -82,6 +89,10 @@ def parse_args():
help="Use the same number of loops as a previous run "
"(i.e., don't recalibrate). Should be a path to a "
".json file from a previous run.")
cmd.add_argument("--timeout",
help="Specify a timeout in seconds for a single "
"benchmark run (default: disabled)",
type=check_positive)
filter_opts(cmd)

# show
Expand Down
4 changes: 2 additions & 2 deletions pyperformance/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,8 @@ def cmd_run(options, benchmarks):

if errors:
print("%s benchmarks failed:" % len(errors))
for name in errors:
print("- %s" % name)
for name, reason in errors:
print("- %s (%s)" % (name, reason))
print()
sys.exit(1)

Expand Down
2 changes: 1 addition & 1 deletion pyperformance/requirements/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,5 +10,5 @@ psutil==5.9.5
# via
# -r requirements.in
# pyperf
pyperf==2.7.0
pyperf==2.8.0
# via -r requirements.in
13 changes: 11 additions & 2 deletions pyperformance/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ def add_bench(dest_suite, obj):
bench_venv, bench_runid = benchmarks.get(bench)
if bench_venv is None:
print("ERROR: Benchmark %s failed: could not install requirements" % name)
errors.append(name)
errors.append((name, "Install requirements error"))
continue
try:
result = bench.run(
Expand All @@ -174,10 +174,17 @@ def add_bench(dest_suite, obj):
venv=bench_venv,
verbose=options.verbose,
)
except TimeoutError as exc:
print("ERROR: Benchmark %s timed out" % name)
errors.append((name, exc))
except RuntimeError as exc:
print("ERROR: Benchmark %s failed: %s" % (name, exc))
traceback.print_exc()
errors.append((name, exc))
except Exception as exc:
print("ERROR: Benchmark %s failed: %s" % (name, exc))
traceback.print_exc()
errors.append(name)
errors.append((name, exc))
else:
suite = add_bench(suite, result)

Expand Down Expand Up @@ -233,5 +240,7 @@ def get_pyperf_opts(options):
opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ))
if options.min_time:
opts.append('--min-time=%s' % options.min_time)
if options.timeout:
opts.append('--timeout=%s' % options.timeout)

return opts
12 changes: 6 additions & 6 deletions pyperformance/tests/test_commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,7 +399,7 @@ def test_compare_single_value(self):
Performance version: 0.2
### call_simple ###
7896.0 kB -> 7900.0 kB: 1.00x larger
7896.0 KiB -> 7900.0 KiB: 1.00x larger
''').lstrip())

def test_compare_csv(self):
Expand Down Expand Up @@ -458,11 +458,11 @@ def test_compare_table_single_value(self):
Performance version: 0.2
+-------------+-----------+-----------+--------------+------------------------------------------+
| Benchmark | mem1.json | mem2.json | Change | Significance |
+=============+===========+===========+==============+==========================================+
| call_simple | 7896.0 kB | 7900.0 kB | 1.00x larger | (benchmark only contains a single value) |
+-------------+-----------+-----------+--------------+------------------------------------------+
+-------------+------------+------------+--------------+------------------------------------------+
| Benchmark | mem1.json | mem2.json | Change | Significance |
+=============+============+============+==============+==========================================+
| call_simple | 7896.0 KiB | 7900.0 KiB | 1.00x larger | (benchmark only contains a single value) |
+-------------+------------+------------+--------------+------------------------------------------+
''').lstrip())


Expand Down

0 comments on commit 22b2819

Please sign in to comment.