Skip to content

Commit 4caf704

Browse files
authored
Include tokens from prompt phase in counter_generation_tokens (#2802)
1 parent 6f32cdd commit 4caf704

File tree

3 files changed

+39
-1
lines changed

3 files changed

+39
-1
lines changed

.buildkite/test-pipeline.yaml

+3
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,9 @@ steps:
5252
- label: LoRA Test
5353
command: pytest -v -s lora
5454

55+
- label: Metrics Test
56+
command: pytest -v -s metrics
57+
5558
- label: Benchmarks
5659
working_dir: "/vllm-workspace/.buildkite"
5760
commands:

tests/metrics/test_metrics.py

+33-1
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,16 @@
99
@pytest.mark.parametrize("model", MODELS)
1010
@pytest.mark.parametrize("dtype", ["float"])
1111
@pytest.mark.parametrize("max_tokens", [128])
12-
def test_metrics(
12+
def test_metric_counter_prompt_tokens(
1313
vllm_runner,
1414
example_prompts,
1515
model: str,
1616
dtype: str,
1717
max_tokens: int,
1818
) -> None:
19+
# Reset metric
20+
vllm.engine.metrics.counter_prompt_tokens.set_value({}, 0)
21+
1922
vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False)
2023
tokenizer = vllm_model.model.get_tokenizer()
2124
prompt_token_counts = [len(tokenizer.encode(p)) for p in example_prompts]
@@ -31,3 +34,32 @@ def test_metrics(
3134
assert vllm_prompt_token_count == metric_count, (
3235
f"prompt token count: {vllm_prompt_token_count!r}\nmetric: {metric_count!r}"
3336
)
37+
38+
39+
@pytest.mark.parametrize("model", MODELS)
40+
@pytest.mark.parametrize("dtype", ["float"])
41+
@pytest.mark.parametrize("max_tokens", [128])
42+
def test_metric_counter_generation_tokens(
43+
vllm_runner,
44+
example_prompts,
45+
model: str,
46+
dtype: str,
47+
max_tokens: int,
48+
) -> None:
49+
# Reset metric
50+
vllm.engine.metrics.counter_generation_tokens.set_value({}, 0)
51+
52+
vllm_model = vllm_runner(model, dtype=dtype, disable_log_stats=False)
53+
vllm_outputs = vllm_model.generate_greedy(example_prompts, max_tokens)
54+
tokenizer = vllm_model.model.get_tokenizer()
55+
metric_count = vllm.engine.metrics.counter_generation_tokens.get_value({})
56+
vllm_generation_count = 0
57+
for i in range(len(example_prompts)):
58+
vllm_output_ids, vllm_output_str = vllm_outputs[i]
59+
prompt_ids = tokenizer.encode(example_prompts[i])
60+
# vllm_output_ids contains both prompt tokens and generation tokens. We're interested only in the count of the generation tokens.
61+
vllm_generation_count += len(vllm_output_ids) - len(prompt_ids)
62+
63+
assert vllm_generation_count == metric_count, (
64+
f"generation token count: {vllm_generation_count!r}\nmetric: {metric_count!r}"
65+
)

vllm/engine/llm_engine.py

+3
Original file line numberDiff line numberDiff line change
@@ -872,6 +872,9 @@ def _get_stats(self,
872872
num_prompt_tokens = sum(
873873
len(seq_group.prompt_token_ids)
874874
for seq_group in scheduler_outputs.scheduled_seq_groups)
875+
num_generation_tokens = sum(
876+
seq_group.num_seqs()
877+
for seq_group in scheduler_outputs.scheduled_seq_groups)
875878
else:
876879
num_generation_tokens = scheduler_outputs.num_batched_tokens
877880

0 commit comments

Comments
 (0)