Skip to content

Commit 5871f39

Browse files
DarkLight1337jimpang
authored and
jimpang
committed
[CI/Build] Add TP test for vision models (vllm-project#5892)
1 parent 7d508d8 commit 5871f39

File tree

9 files changed

+131
-27
lines changed

9 files changed

+131
-27
lines changed

.buildkite/test-pipeline.yaml

+5
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ steps:
4444
working_dir: "/vllm-workspace/tests"
4545
num_gpus: 2
4646
commands:
47+
- bash ../.buildkite/download-images.sh
4748
# FIXIT: find out which code initialize cuda before running the test
4849
# before the fix, we need to use spawn to test it
4950
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
@@ -52,10 +53,14 @@ steps:
5253
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_basic_distributed_correctness.py
5354
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_chunked_prefill_distributed.py
5455
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_chunked_prefill_distributed.py
56+
- TEST_DIST_MODEL=llava-hf/llava-1.5-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_multimodal_broadcast.py
57+
- TEST_DIST_MODEL=microsoft/Phi-3-vision-128k-instruct DISTRIBUTED_EXECUTOR_BACKEND=ray pytest -v -s distributed/test_multimodal_broadcast.py
5558
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_basic_distributed_correctness.py
5659
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_basic_distributed_correctness.py
5760
- TEST_DIST_MODEL=facebook/opt-125m DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_chunked_prefill_distributed.py
5861
- TEST_DIST_MODEL=meta-llama/Llama-2-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_chunked_prefill_distributed.py
62+
- TEST_DIST_MODEL=llava-hf/llava-1.5-7b-hf DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_multimodal_broadcast.py
63+
- TEST_DIST_MODEL=microsoft/Phi-3-vision-128k-instruct DISTRIBUTED_EXECUTOR_BACKEND=mp pytest -v -s distributed/test_multimodal_broadcast.py
5964
- pytest -v -s spec_decode/e2e/test_integration_dist_tp2.py
6065
- CUDA_VISIBLE_DEVICES=0,1 pytest -v -s test_sharded_state_loader.py
6166
- CUDA_VISIBLE_DEVICES=0,1 pytest -v -s distributed/test_utils.py
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
"""Compare the outputs of HF and distributed vLLM when using greedy sampling.
2+
The second test will hang if more than one test is run per command, so we need
3+
to run the tests one by one. The solution is to pass arguments (model name) by
4+
environment variables.
5+
6+
Run:
7+
```sh
8+
TEST_DIST_MODEL=llava-hf/llava-1.5-7b-hf \
9+
test_multimodal_broadcast.py
10+
TEST_DIST_MODEL=microsoft/Phi-3-vision-128k-instruct \
11+
test_multimodal_broadcast.py
12+
```
13+
"""
14+
import os
15+
16+
import pytest
17+
18+
from vllm.utils import cuda_device_count_stateless
19+
20+
model = os.environ["TEST_DIST_MODEL"]
21+
22+
if model.startswith("llava-hf/llava"):
23+
from ..models.test_llava import model_and_vl_config, run_test
24+
elif model.startswith("microsoft/Phi-3-vision"):
25+
from ..models.test_phi3v import model_and_vl_config, run_test
26+
else:
27+
raise NotImplementedError(f"Unsupported model: {model}")
28+
29+
30+
@pytest.mark.parametrize("tensor_parallel_size", [2])
31+
@pytest.mark.parametrize("dtype", ["half"])
32+
@pytest.mark.parametrize("max_tokens", [128])
33+
def test_models(hf_runner, vllm_runner, image_assets,
34+
tensor_parallel_size: int, dtype: str,
35+
max_tokens: int) -> None:
36+
if cuda_device_count_stateless() < tensor_parallel_size:
37+
pytest.skip(
38+
f"Need at least {tensor_parallel_size} GPUs to run the test.")
39+
40+
distributed_executor_backend = os.getenv("DISTRIBUTED_EXECUTOR_BACKEND")
41+
42+
run_test(
43+
hf_runner,
44+
vllm_runner,
45+
image_assets,
46+
model_and_config=model_and_vl_config[0],
47+
dtype=dtype,
48+
max_tokens=max_tokens,
49+
tensor_parallel_size=tensor_parallel_size,
50+
distributed_executor_backend=distributed_executor_backend,
51+
)

tests/models/test_llava.py

+31-8
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
from typing import List, Tuple
1+
from typing import List, Optional, Tuple, Type
22

33
import pytest
44
from transformers import AutoTokenizer
55

66
from vllm.config import VisionLanguageConfig
77

8-
from ..conftest import IMAGE_ASSETS
8+
from ..conftest import IMAGE_ASSETS, HfRunner, VllmRunner, _ImageAssets
99

1010
pytestmark = pytest.mark.vlm
1111

@@ -65,12 +65,17 @@ def vllm_to_hf_output(vllm_output: Tuple[List[int], str],
6565
return hf_output_ids, hf_output_str
6666

6767

68-
# TODO: Add test for `tensor_parallel_size` [ref: PR #3883]
69-
@pytest.mark.parametrize("model_and_config", model_and_vl_config)
70-
@pytest.mark.parametrize("dtype", ["half"])
71-
@pytest.mark.parametrize("max_tokens", [128])
72-
def test_models(hf_runner, vllm_runner, image_assets, model_and_config,
73-
dtype: str, max_tokens: int) -> None:
68+
def run_test(
69+
hf_runner: Type[HfRunner],
70+
vllm_runner: Type[VllmRunner],
71+
image_assets: _ImageAssets,
72+
model_and_config: Tuple[str, VisionLanguageConfig],
73+
*,
74+
dtype: str,
75+
max_tokens: int,
76+
tensor_parallel_size: int,
77+
distributed_executor_backend: Optional[str] = None,
78+
):
7479
"""Inference result should be the same between hf and vllm.
7580
7681
All the image fixtures for the test is under tests/images.
@@ -96,6 +101,8 @@ def test_models(hf_runner, vllm_runner, image_assets, model_and_config,
96101

97102
with vllm_runner(model_id,
98103
dtype=dtype,
104+
tensor_parallel_size=tensor_parallel_size,
105+
distributed_executor_backend=distributed_executor_backend,
99106
enforce_eager=True,
100107
**vlm_config.as_cli_args_dict()) as vllm_model:
101108
vllm_outputs = vllm_model.generate_greedy(vllm_image_prompts,
@@ -110,3 +117,19 @@ def test_models(hf_runner, vllm_runner, image_assets, model_and_config,
110117
f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}")
111118
assert hf_output_ids == vllm_output_ids, (
112119
f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}")
120+
121+
122+
@pytest.mark.parametrize("model_and_config", model_and_vl_config)
123+
@pytest.mark.parametrize("dtype", ["half"])
124+
@pytest.mark.parametrize("max_tokens", [128])
125+
def test_models(hf_runner, vllm_runner, image_assets, model_and_config,
126+
dtype: str, max_tokens: int) -> None:
127+
run_test(
128+
hf_runner,
129+
vllm_runner,
130+
image_assets,
131+
model_and_config,
132+
dtype=dtype,
133+
max_tokens=max_tokens,
134+
tensor_parallel_size=1,
135+
)

tests/models/test_phi3v.py

+36-13
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
from typing import List, Tuple
1+
from typing import List, Optional, Tuple, Type
22

33
import pytest
44
from transformers import AutoTokenizer
55

66
from vllm.config import VisionLanguageConfig
77
from vllm.utils import is_cpu
88

9-
from ..conftest import IMAGE_ASSETS
9+
from ..conftest import IMAGE_ASSETS, HfRunner, VllmRunner, _ImageAssets
1010

1111
pytestmark = pytest.mark.vlm
1212

@@ -73,17 +73,17 @@ def vllm_to_hf_output(vllm_output: Tuple[List[int], str],
7373
target_dtype = "bfloat16"
7474

7575

76-
# TODO: Add test for `tensor_parallel_size` [ref: PR #3883]
77-
# Since we use _attn_implementation="eager" for hf_runner, here is
78-
# numeric difference for longer context and test can't pass
79-
@pytest.mark.xfail(
80-
reason="Inconsistent image processor being used due to lack "
81-
"of support for dynamic image token replacement")
82-
@pytest.mark.parametrize("model_and_config", model_and_vl_config)
83-
@pytest.mark.parametrize("dtype", [target_dtype])
84-
@pytest.mark.parametrize("max_tokens", [128])
85-
def test_models(hf_runner, vllm_runner, image_assets, model_and_config,
86-
dtype: str, max_tokens: int) -> None:
76+
def run_test(
77+
hf_runner: Type[HfRunner],
78+
vllm_runner: Type[VllmRunner],
79+
image_assets: _ImageAssets,
80+
model_and_config: Tuple[str, VisionLanguageConfig],
81+
*,
82+
dtype: str,
83+
max_tokens: int,
84+
tensor_parallel_size: int,
85+
distributed_executor_backend: Optional[str] = None,
86+
):
8787
"""Inference result should be the same between hf and vllm.
8888
8989
All the image fixtures for the test is under tests/images.
@@ -116,7 +116,9 @@ def test_models(hf_runner, vllm_runner, image_assets, model_and_config,
116116
with vllm_runner(model_id,
117117
max_model_len=2048,
118118
dtype=dtype,
119+
tensor_parallel_size=tensor_parallel_size,
119120
enforce_eager=True,
121+
distributed_executor_backend=distributed_executor_backend,
120122
**vlm_config.as_cli_args_dict()) as vllm_model:
121123
vllm_outputs = vllm_model.generate_greedy(vllm_image_prompts,
122124
max_tokens,
@@ -130,3 +132,24 @@ def test_models(hf_runner, vllm_runner, image_assets, model_and_config,
130132
f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}")
131133
assert hf_output_ids == vllm_output_ids, (
132134
f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}")
135+
136+
137+
# Since we use _attn_implementation="eager" for hf_runner, here is
138+
# numeric difference for longer context and test can't pass
139+
@pytest.mark.xfail(
140+
reason="Inconsistent image processor being used due to lack "
141+
"of support for dynamic image token replacement")
142+
@pytest.mark.parametrize("model_and_config", model_and_vl_config)
143+
@pytest.mark.parametrize("dtype", [target_dtype])
144+
@pytest.mark.parametrize("max_tokens", [128])
145+
def test_models(hf_runner, vllm_runner, image_assets, model_and_config,
146+
dtype: str, max_tokens: int) -> None:
147+
run_test(
148+
hf_runner,
149+
vllm_runner,
150+
image_assets,
151+
model_and_config,
152+
dtype=dtype,
153+
max_tokens=max_tokens,
154+
tensor_parallel_size=1,
155+
)

vllm/distributed/device_communicators/shm_broadcast.py

+1
Original file line numberDiff line numberDiff line change
@@ -268,6 +268,7 @@ def broadcast_object(self, obj=None):
268268
else:
269269
return self.dequeue()
270270

271+
@staticmethod
271272
def create_from_process_group(pg: ProcessGroup,
272273
max_chunk_bytes,
273274
max_chunks,

vllm/distributed/parallel_state.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,7 @@ def __init__(
194194
self.shm_broadcaster: Optional[ShmRingBufferIO] = None
195195
if self.world_size > 1 and is_in_the_same_node(self.cpu_group):
196196
self.shm_broadcaster = ShmRingBufferIO.create_from_process_group(
197-
self.cpu_group, 1 << 20, 6)
197+
self.cpu_group, 1 << 22, 6)
198198

199199
@property
200200
def first_rank(self):
@@ -690,6 +690,8 @@ def destroy(self):
690690
self.pynccl_comm = None
691691
if self.ca_comm is not None:
692692
self.ca_comm = None
693+
if self.shm_broadcaster is not None:
694+
self.shm_broadcaster = None
693695

694696

695697
_WORLD: Optional[GroupCoordinator] = None

vllm/model_executor/models/llava.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ def _image_pixels_to_features(self, vision_tower: CLIPVisionModel,
219219

220220
# NOTE: we skip the step to select the vision feature layer since
221221
# this is already done inside the vision tower
222-
image_features = vision_tower(pixel_values.to(vision_tower.device),
222+
image_features = vision_tower(pixel_values,
223223
self.config.vision_feature_layer)
224224

225225
return self._select_image_features(

vllm/model_executor/models/llava_next.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,7 @@ def _image_pixels_to_features(self, vision_tower: CLIPVisionModel,
301301

302302
# NOTE: we skip the step to select the vision feature layer since
303303
# this is already done inside the vision tower
304-
image_features = vision_tower(pixel_values.to(vision_tower.device),
304+
image_features = vision_tower(pixel_values,
305305
self.config.vision_feature_layer)
306306

307307
return self._select_image_features(

vllm/model_executor/models/phi3v.py

+2-3
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,6 @@ def forward(self, input_ids: torch.LongTensor,
157157

158158
select = False
159159

160-
target_device = self.img_projection[0].bias.device
161160
target_dtype = self.img_projection[0].bias.dtype
162161

163162
if len(positions.tolist()) > 0:
@@ -231,7 +230,7 @@ def forward(self, input_ids: torch.LongTensor,
231230
img_set_tensor = []
232231
for _output_img in output_imgs:
233232
img_feature_proj = self.img_projection(
234-
_output_img.to(target_device, target_dtype))
233+
_output_img.to(target_dtype))
235234
img_set_tensor.append(img_feature_proj)
236235
select = True
237236

@@ -245,7 +244,7 @@ def forward(self, input_ids: torch.LongTensor,
245244
hidden_states[positions[idx, 0],
246245
positions[idx, 1]:positions[idx, 1] +
247246
cnt] = (img_set_tensor[i].to(
248-
hidden_states.device, hidden_states.dtype))
247+
hidden_states.dtype))
249248
idx += cnt
250249

251250
return hidden_states.squeeze(0)

0 commit comments

Comments
 (0)