Skip to content
This repository has been archived by the owner on Oct 11, 2024. It is now read-only.

Commit

Permalink
Handle server startup failure in __enter__
Browse files Browse the repository at this point in the history
  • Loading branch information
dbarbuzzi committed May 31, 2024
1 parent a160eb9 commit 7a575f0
Showing 1 changed file with 9 additions and 4 deletions.
13 changes: 9 additions & 4 deletions tests/utils/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,15 @@ def __enter__(self):
ray.init(ignore_reinit_error=True)
log_banner(self._logger, "server startup command args",
shlex.join(self._args))
self.server_runner = ServerRunner.remote(self._args,
logger=self._logger)
ray.get(self.server_runner.ready.remote())
return self.server_runner

try:
self.server_runner = ServerRunner.remote(self._args,
logger=self._logger)
ray.get(self.server_runner.ready.remote())
return self.server_runner
except Exception as e:
self.__exit__(*sys.exc_info())
raise e

def __exit__(self, exc_type, exc_value, exc_traceback):
"""
Expand Down

4 comments on commit 7a575f0

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 7a575f0 Previous: a160eb9 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.4.0", "python_version": "3.8.17 (default, Jun 7 2023, 12:29:39) \n[GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 3.800600191953658 prompts/s
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.4.0", "python_version": "3.8.17 (default, Jun 7 2023, 12:29:39) \n[GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 1459.4304737102048 tokens/s

This comment was automatically generated by workflow using github-action-benchmark.

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 7a575f0 Previous: a160eb9 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.4.0", "python_version": "3.11.4 (main, Jun 7 2023, 10:57:56) [GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 3.853903928862278 prompts/s
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.4.0", "python_version": "3.11.4 (main, Jun 7 2023, 10:57:56) [GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 1479.8991086831147 tokens/s

This comment was automatically generated by workflow using github-action-benchmark.

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 7a575f0 Previous: a160eb9 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.4.0", "python_version": "3.9.17 (main, Jun 7 2023, 12:29:40) \n[GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 3.789483841774373 prompts/s 3.79666143344845 prompts/s 1.00
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.4.0", "python_version": "3.9.17 (main, Jun 7 2023, 12:29:40) \n[GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 1455.1617952413592 tokens/s 1457.9179904442049 tokens/s 1.00

This comment was automatically generated by workflow using github-action-benchmark.

@github-actions
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bigger_is_better

Benchmark suite Current: 7a575f0 Previous: a160eb9 Ratio
{"name": "request_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.4.0", "python_version": "3.10.12 (main, Jun 7 2023, 13:38:31) [GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 3.8087115730087704 prompts/s
{"name": "token_throughput", "description": "VLLM Engine throughput - synthetic\nmodel - NousResearch/Llama-2-7b-chat-hf\nmax_model_len - 4096\nbenchmark_throughput {\n \"use-all-available-gpus_\": \"\",\n \"input-len\": 256,\n \"output-len\": 128,\n \"num-prompts\": 1000\n}", "gpu_description": "NVIDIA A10G x 1", "vllm_version": "0.4.0", "python_version": "3.10.12 (main, Jun 7 2023, 13:38:31) [GCC 9.4.0]", "torch_version": "2.3.0+cu121"} 1462.545244035368 tokens/s

This comment was automatically generated by workflow using github-action-benchmark.

Please sign in to comment.