Skip to content

Add docstrings for LLM #137

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 13 commits into from
Jun 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions benchmarks/benchmark_latency.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,15 @@ def main(args: argparse.Namespace):
max_tokens=args.output_len,
)
print(sampling_params)
dummy_prompts = [""] * args.batch_size
dummy_prompt_token_ids = [[0] * args.input_len] * args.batch_size

def run_to_completion(profile: bool = False):
if profile:
torch.cuda.cudart().cudaProfilerStart()
start_time = time.time()

llm.generate(dummy_prompts, sampling_params, dummy_prompt_token_ids,
llm.generate(prompt_token_ids=dummy_prompt_token_ids,
sampling_params=sampling_params,
use_tqdm=False)

end_time = time.time()
Expand Down
8 changes: 5 additions & 3 deletions benchmarks/benchmark_throughput.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,9 @@ def main(args: argparse.Namespace):
)
# FIXME(woosuk): Do not use internal method.
llm._add_request(
prompt="",
sampling_params=sampling_params,
prompt=None,
prompt_token_ids=prompt_token_ids,
sampling_params=sampling_params,
)

start = time.time()
Expand All @@ -85,7 +85,9 @@ def main(args: argparse.Namespace):
len(prompt_token_ids) + output_len
for prompt_token_ids, output_len in requests
)
print(f"Throughput: {total_num_tokens / (end - start):.2f} tokens/s")
elapsed_time = end - start
print(f"Throughput: {len(requests) / elapsed_time:.2f} requests/s, "
f"{total_num_tokens / elapsed_time:.2f} tokens/s")


if __name__ == "__main__":
Expand Down
61 changes: 57 additions & 4 deletions cacheflow/entrypoints/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,28 @@


class LLM:
"""An LLM for generating texts from given prompts and sampling parameters.

This class includes a tokenizer, a language model (possibly distributed
across multiple GPUs), and GPU memory space allocated for intermediate
states (aka KV cache). Given a batch of prompts and sampling parameters,
this class generates texts from the model, using an intelligent batching
mechanism and efficient memory management.

NOTE: This class is intended to be used for offline inference. For online
serving, use the `AsyncLLMServer` class instead.
NOTE: For the comprehensive list of arguments, see `ServerArgs`.

Args:
model: The name or path of a HuggingFace Transformers model.
tensor_parallel_size: The number of GPUs to use for distributed
execution with tensor parallelism.
dtype: The data type for the model weights and activations. Currently,
we support `float16` and `bfloat16`. If `default`, we use the
`torch_dtype` attribute of the model config. If the `torch_dtype`
is `float32`, we use `float16` instead.
seed: The seed to initialize the random number generator for sampling.
"""

def __init__(
self,
Expand Down Expand Up @@ -39,19 +61,50 @@ def get_tokenizer(

def generate(
self,
prompts: Union[str, List[str]],
prompts: Optional[Union[str, List[str]]] = None,
sampling_params: Optional[SamplingParams] = None,
prompt_token_ids: Optional[List[List[int]]] = None,
use_tqdm: bool = True,
) -> List[RequestOutput]:
"""Generates the completions for the input prompts.

NOTE: This class automatically batches the given prompts, considering
the memory constraint. For the best performance, put all of your prompts
into a single list and pass it to this method.

Args:
prompts: A list of prompts to generate completions for.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not related to this PR but just noticed: Should we make the prompts argument also optional? I thought if a user provides prompt_token_ids, they do not need to provide prompts. Is this correct?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch. Fixed!

sampling_params: The sampling parameters for text generation. If
None, we use the default sampling parameters.
prompt_token_ids: A list of token IDs for the prompts. If None, we
use the tokenizer to convert the prompts to token IDs.
use_tqdm: Whether to use tqdm to display the progress bar.

Returns:
A list of `RequestOutput` objects containing the generated
completions in the same order as the input prompts.
"""
if prompts is None and prompt_token_ids is None:
raise ValueError("Either prompts or prompt_token_ids must be "
"provided.")
if isinstance(prompts, str):
# Convert a single prompt to a list.
prompts = [prompts]
if prompts is not None and prompt_token_ids is not None:
if len(prompts) != len(prompt_token_ids):
raise ValueError("The lengths of prompts and prompt_token_ids "
"must be the same.")
if sampling_params is None:
# Use default sampling params.
sampling_params = SamplingParams()

# Add requests to the server.
for i in range(len(prompts)):
prompt = prompts[i]
if prompts is not None:
num_requests = len(prompts)
else:
num_requests = len(prompt_token_ids)
for i in range(num_requests):
prompt = prompts[i] if prompts is not None else None
if prompt_token_ids is None:
token_ids = None
else:
Expand All @@ -61,7 +114,7 @@ def generate(

def _add_request(
self,
prompt: str,
prompt: Optional[str],
sampling_params: SamplingParams,
prompt_token_ids: Optional[List[int]],
) -> None:
Expand Down
3 changes: 2 additions & 1 deletion cacheflow/server/llm_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,14 +126,15 @@ def from_server_args(cls, server_args: ServerArgs) -> "LLMServer":
def add_request(
self,
request_id: str,
prompt: str,
prompt: Optional[str],
sampling_params: SamplingParams,
prompt_token_ids: Optional[List[int]] = None,
arrival_time: Optional[float] = None,
) -> None:
if arrival_time is None:
arrival_time = time.time()
if prompt_token_ids is None:
assert prompt is not None
prompt_token_ids = self.tokenizer.encode(prompt)

# Create the sequences.
Expand Down