Skip to content

Commit

Permalink
Merge pull request #2033 from igchor/skip_bench
Browse files Browse the repository at this point in the history
[Benchmarks] skip failed benchmarks by default
  • Loading branch information
igchor authored Aug 29, 2024
2 parents fc7a473 + 6238b65 commit b09a969
Showing 1 changed file with 38 additions and 24 deletions.
62 changes: 38 additions & 24 deletions scripts/benchmarks/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,34 +52,46 @@ def main(directory, additional_env_vars, save_name, compare_names, filter):
benchmarks = [benchmark for benchmark in benchmarks if filter.search(benchmark.name())]

for benchmark in benchmarks:
print(f"setting up {benchmark.name()}... ", end='', flush=True)
benchmark.setup()
print("complete.")
try:
print(f"setting up {benchmark.name()}... ", end='', flush=True)
benchmark.setup()
print("complete.")
except Exception as e:
if options.exit_on_failure:
raise e
else:
print(f"failed: {e}")

results = []
for benchmark in benchmarks:
merged_env_vars = {**additional_env_vars}
iteration_results = []
for iter in range(options.iterations):
print(f"running {benchmark.name()}, iteration {iter}... ", end='', flush=True)
bench_results = benchmark.run(merged_env_vars)
if bench_results is not None:
print(f"complete ({bench_results.value} {benchmark.unit()}).")
iteration_results.append(bench_results)
try:
merged_env_vars = {**additional_env_vars}
iteration_results = []
for iter in range(options.iterations):
print(f"running {benchmark.name()}, iteration {iter}... ", end='', flush=True)
bench_results = benchmark.run(merged_env_vars)
if bench_results is not None:
print(f"complete ({bench_results.value} {benchmark.unit()}).")
iteration_results.append(bench_results)
else:
print(f"did not finish.")

if len(iteration_results) == 0:
continue

iteration_results.sort(key=lambda res: res.value)
median_index = len(iteration_results) // 2
median_result = iteration_results[median_index]

median_result.unit = benchmark.unit()
median_result.name = benchmark.name()

results.append(median_result)
except Exception as e:
if options.exit_on_failure:
raise e
else:
print(f"did not finish.")

if len(iteration_results) == 0:
continue

iteration_results.sort(key=lambda res: res.value)
median_index = len(iteration_results) // 2
median_result = iteration_results[median_index]

median_result.unit = benchmark.unit()
median_result.name = benchmark.name()

results.append(median_result)
print(f"failed: {e}")

for benchmark in benchmarks:
print(f"tearing down {benchmark.name()}... ", end='', flush=True)
Expand Down Expand Up @@ -126,6 +138,7 @@ def validate_and_parse_env_args(env_args):
parser.add_argument("--timeout", type=int, help='Timeout for individual benchmarks in seconds.', default=600)
parser.add_argument("--filter", type=str, help='Regex pattern to filter benchmarks by name.', default=None)
parser.add_argument("--verbose", help='Print output of all the commands.', action="store_true")
parser.add_argument("--exit_on_failure", help='Exit on first failure.', action="store_true")

args = parser.parse_args()
additional_env_vars = validate_and_parse_env_args(args.env)
Expand All @@ -137,6 +150,7 @@ def validate_and_parse_env_args(env_args):
options.timeout = args.timeout
options.ur_dir = args.ur_dir
options.ur_adapter_name = args.ur_adapter_name
options.exit_on_failure = args.exit_on_failure

benchmark_filter = re.compile(args.filter) if args.filter else None

Expand Down

0 comments on commit b09a969

Please sign in to comment.