Skip to content

Commit

Permalink
[Release] Use with clause to make sure result json file is closed pro…
Browse files Browse the repository at this point in the history
…perly (#46484)

Signed-off-by: Jiajun Yao <jeromeyjj@gmail.com>
  • Loading branch information
jjyao authored Jul 10, 2024
1 parent a0fe305 commit 2c5745f
Show file tree
Hide file tree
Showing 10 changed files with 96 additions and 95 deletions.
4 changes: 2 additions & 2 deletions release/autoscaling_tests/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ def run(local):
"success": success,
}
if "TEST_OUTPUT_JSON" in os.environ:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
json.dump(results, out_file)
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
json.dump(results, out_file)

print(json.dumps(results, indent=2))

Expand Down
28 changes: 14 additions & 14 deletions release/benchmarks/distributed/many_nodes_tests/actor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,20 +109,20 @@ def main():
print(f"Result: {json.dumps(result, indent=2)}")

if "TEST_OUTPUT_JSON" in os.environ and not args.no_report:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
perf = [
{
"perf_metric_name": name,
"perf_metric_value": r["throughput"],
"perf_metric_type": "THROUGHPUT",
}
for (name, r) in result.items()
]
result["perf_metrics"] = perf
dashboard_test.update_release_test_result(result)

print(f"Writing data into file: {os.environ['TEST_OUTPUT_JSON']}")
json.dump(result, out_file)
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
perf = [
{
"perf_metric_name": name,
"perf_metric_value": r["throughput"],
"perf_metric_type": "THROUGHPUT",
}
for (name, r) in result.items()
]
result["perf_metrics"] = perf
dashboard_test.update_release_test_result(result)

print(f"Writing data into file: {os.environ['TEST_OUTPUT_JSON']}")
json.dump(result, out_file)

print("Test finished successfully!")
ray.shutdown()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,15 +76,15 @@ def main():
)

if "TEST_OUTPUT_JSON" in os.environ and not args.no_report:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
results = {
"actor_launch_time": actor_launch_time,
"actor_ready_time": actor_ready_time,
"total_time": actor_launch_time + actor_ready_time,
"num_actors": args.total_actors,
"success": "1",
}
json.dump(results, out_file)
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
results = {
"actor_launch_time": actor_launch_time,
"actor_ready_time": actor_ready_time,
"total_time": actor_launch_time + actor_ready_time,
"num_actors": args.total_actors,
"success": "1",
}
json.dump(results, out_file)


if __name__ == "__main__":
Expand Down
31 changes: 16 additions & 15 deletions release/benchmarks/object_store/test_object_store.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,19 +58,20 @@ def data_len(self, arr):
print(f"Broadcast time: {duration} ({OBJECT_SIZE} B x {NUM_NODES} nodes)")

if "TEST_OUTPUT_JSON" in os.environ:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
results = {
"broadcast_time": duration,
"object_size": OBJECT_SIZE,
"num_nodes": NUM_NODES,
"success": "1",
}
perf_metric_name = f"time_to_broadcast_{OBJECT_SIZE}_bytes_to_{NUM_NODES}_nodes"
results["perf_metrics"] = [
{
"perf_metric_name": perf_metric_name,
"perf_metric_value": duration,
"perf_metric_type": "LATENCY",
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
results = {
"broadcast_time": duration,
"object_size": OBJECT_SIZE,
"num_nodes": NUM_NODES,
"success": "1",
}
]
json.dump(results, out_file)
perf_metric_name = f"time_to_broadcast_{OBJECT_SIZE}_bytes_to_{NUM_NODES}_nodes"
results["perf_metrics"] = [
{
"perf_metric_name": perf_metric_name,
"perf_metric_value": duration,
"perf_metric_type": "LATENCY",
}
]
print(f"jjyao {results} {out_file}")
json.dump(results, out_file)
84 changes: 42 additions & 42 deletions release/benchmarks/single_node/test_single_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,45 +192,45 @@ def test_large_object():
print(f"Ray.get large object time: {large_object_time} " f"({MAX_RAY_GET_SIZE} bytes)")

if "TEST_OUTPUT_JSON" in os.environ:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
results = {
"args_time": args_time,
"num_args": MAX_ARGS,
"returns_time": returns_time,
"num_returns": MAX_RETURNS,
"get_time": get_time,
"num_get_args": MAX_RAY_GET_ARGS,
"queued_time": queued_time,
"num_queued": MAX_QUEUED_TASKS,
"large_object_time": large_object_time,
"large_object_size": MAX_RAY_GET_SIZE,
"success": "1",
}
results["perf_metrics"] = [
{
"perf_metric_name": f"{MAX_ARGS}_args_time",
"perf_metric_value": args_time,
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": f"{MAX_RETURNS}_returns_time",
"perf_metric_value": returns_time,
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": f"{MAX_RAY_GET_ARGS}_get_time",
"perf_metric_value": get_time,
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": f"{MAX_QUEUED_TASKS}_queued_time",
"perf_metric_value": queued_time,
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": f"{MAX_RAY_GET_SIZE}_large_object_time",
"perf_metric_value": large_object_time,
"perf_metric_type": "LATENCY",
},
]
json.dump(results, out_file)
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
results = {
"args_time": args_time,
"num_args": MAX_ARGS,
"returns_time": returns_time,
"num_returns": MAX_RETURNS,
"get_time": get_time,
"num_get_args": MAX_RAY_GET_ARGS,
"queued_time": queued_time,
"num_queued": MAX_QUEUED_TASKS,
"large_object_time": large_object_time,
"large_object_size": MAX_RAY_GET_SIZE,
"success": "1",
}
results["perf_metrics"] = [
{
"perf_metric_name": f"{MAX_ARGS}_args_time",
"perf_metric_value": args_time,
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": f"{MAX_RETURNS}_returns_time",
"perf_metric_value": returns_time,
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": f"{MAX_RAY_GET_ARGS}_get_time",
"perf_metric_value": get_time,
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": f"{MAX_QUEUED_TASKS}_queued_time",
"perf_metric_value": queued_time,
"perf_metric_type": "LATENCY",
},
{
"perf_metric_name": f"{MAX_RAY_GET_SIZE}_large_object_time",
"perf_metric_value": large_object_time,
"perf_metric_type": "LATENCY",
},
]
json.dump(results, out_file)
Original file line number Diff line number Diff line change
Expand Up @@ -145,9 +145,9 @@ def main():
assert creation_perf["p50_ms"] * 100 > creation_perf_final["p50_ms"]

if "TEST_OUTPUT_JSON" in os.environ:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
results = {}
json.dump(results, out_file)
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
results = {}
json.dump(results, out_file)


main()
6 changes: 3 additions & 3 deletions release/nightly_tests/placement_group_tests/pg_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,9 +61,9 @@ def main():
break

if "TEST_OUTPUT_JSON" in os.environ:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
results = {}
json.dump(results, out_file)
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
results = {}
json.dump(results, out_file)


if __name__ == "__main__":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,6 @@ def parse_script_args():
run_full_benchmark(args.num_pending_pgs)

if "TEST_OUTPUT_JSON" in os.environ:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
results = {"success": 1}
json.dump(results, out_file)
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
results = {"success": 1}
json.dump(results, out_file)
4 changes: 2 additions & 2 deletions release/nightly_tests/stress_tests/test_state_api_scale.py
Original file line number Diff line number Diff line change
Expand Up @@ -466,8 +466,8 @@ def test(
]

if "TEST_OUTPUT_JSON" in os.environ:
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
json.dump(results, out_file)
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
json.dump(results, out_file)

results.update(state_perf_result)
print(json.dumps(results, indent=2))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -201,8 +201,8 @@ def not_none(res):

if "TEST_OUTPUT_JSON" in os.environ:
# This will overwrite all other release tests result
out_file = open(os.environ["TEST_OUTPUT_JSON"], "w")
json.dump(results, out_file)
with open(os.environ["TEST_OUTPUT_JSON"], "w") as out_file:
json.dump(results, out_file)
print(json.dumps(results, indent=2))

assert cleanup_release_test(test_name)
Expand Down

0 comments on commit 2c5745f

Please sign in to comment.