Skip to content

Commit

Permalink
Include tokens from prompt phase in counter_generation_tokens (#2802)
Browse files Browse the repository at this point in the history
  • Loading branch information
ronensc authored Feb 22, 2024
1 parent 6f32cdd commit 4caf704
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 1 deletion.
3 changes: 3 additions & 0 deletions .buildkite/test-pipeline.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@ steps:
- label: LoRA Test
command: pytest -v -s lora

- label: Metrics Test
command: pytest -v -s metrics

- label: Benchmarks
working_dir: "/vllm-workspace/.buildkite"
commands:
Expand Down
34 changes: 33 additions & 1 deletion tests/metrics/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,16 @@
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", ["float"])
@pytest.mark.parametrize("max_tokens", [128])
def test_metrics(
def test_metric_counter_prompt_tokens(
vllm_runner,
example_prompts,
model: str,
dtype: str,
max_tokens: int,
) -> None:
# Reset metric
vllm.engine.metrics.counter_prompt_tokens.set_value({}, 0)

vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False)
tokenizer = vllm_model.model.get_tokenizer()
prompt_token_counts = [len(tokenizer.encode(p)) for p in example_prompts]
Expand All @@ -31,3 +34,32 @@ def test_metrics(
assert vllm_prompt_token_count == metric_count, (
f"prompt token count: {vllm_prompt_token_count!r}\nmetric: {metric_count!r}"
)


@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", ["float"])
@pytest.mark.parametrize("max_tokens", [128])
def test_metric_counter_generation_tokens(
vllm_runner,
example_prompts,
model: str,
dtype: str,
max_tokens: int,
) -> None:
# Reset metric
vllm.engine.metrics.counter_generation_tokens.set_value({}, 0)

vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False)
vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens)
tokenizer = vllm_model.model.get_tokenizer()
metric_count = vllm.engine.metrics.counter_generation_tokens.get_value({})
vllm_generation_count = 0
for i in range(len(example_prompts)):
vllm_output_ids, vllm_output_str = vllm_outputs[i]
prompt_ids = tokenizer.encode(example_prompts[i])
# vllm_output_ids contains both prompt tokens and generation tokens. We're interested only in the count of the generation tokens.
vllm_generation_count += len(vllm_output_ids) - len(prompt_ids)

assert vllm_generation_count == metric_count, (
f"generation token count: {vllm_generation_count!r}\nmetric: {metric_count!r}"
)
3 changes: 3 additions & 0 deletions vllm/engine/llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -872,6 +872,9 @@ def _get_stats(self,
num_prompt_tokens = sum(
len(seq_group.prompt_token_ids)
for seq_group in scheduler_outputs.scheduled_seq_groups)
num_generation_tokens = sum(
seq_group.num_seqs()
for seq_group in scheduler_outputs.scheduled_seq_groups)
else:
num_generation_tokens = scheduler_outputs.num_batched_tokens

Expand Down

0 comments on commit 4caf704

Please sign in to comment.