Skip to content

Commit

Permalink
Add image tag standardization, multimodal qwen tests
Browse files Browse the repository at this point in the history
Signed-off-by: Alex-Brooks <Alex.Brooks@ibm.com>
  • Loading branch information
alex-jw-brooks committed Sep 1, 2024
1 parent 27f819a commit fcdd6f1
Show file tree
Hide file tree
Showing 3 changed files with 201 additions and 19 deletions.
4 changes: 4 additions & 0 deletions examples/offline_inference_vision_language.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,10 @@ def run_blip2(question):
def run_qwen_vl(question):

llm = LLM(model="Qwen/Qwen-VL", trust_remote_code=True)
# NOTE: In this case, we could pass either '<image>' or
# 'Picture {idx} <img></img>'; currently <image> tags get
# unified and resolved to the corresponding indices as part
# of the Qwen model input processor.
prompt = f"{question}<image>"
stop_token_ids = None
return llm, prompt, stop_token_ids
Expand Down
177 changes: 164 additions & 13 deletions tests/models/test_qwen.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,170 @@
from typing import Type
import pathlib
from typing import List, Optional, Type

import pytest
from transformers import AutoTokenizer

from ..conftest import HfRunner, VllmRunner
from vllm.model_executor.models.qwen import get_qwen_llm_inputs
from vllm.multimodal.utils import rescale_image_size

from ..conftest import IMAGE_ASSETS, HfRunner, VllmRunner, _ImageAssets
from .utils import check_logprobs_close

pytestmark = pytest.mark.vlm

text_only_models = [
"Qwen/Qwen-7B-Chat" # Has no visual component
]

multimodal_models = ["Qwen/Qwen-VL"]

HF_IMAGE_PROMPTS = IMAGE_ASSETS.prompts({
"stop_sign":
"Picture 1: <img></img>\nWhat's the content of the image?: ",
"cherry_blossom":
"Picture 1: <img></img>\nWhat is the season?: ",
})


### Tests for multimodal Qwen models
@pytest.mark.parametrize("hf_input_text,vllm_input_text,num_images", [
("I have no image tags", "I have no image tags", 0),
("Picture 1: <img></img>\n", "Picture 1: <img></img>\n", 1),
("Picture 1: <img></img>\n", "<image>", 1),
("Picture 1: <img></img>\n Picture 2: <img></img>\n", "<image> <image>",
2),
])
def test_qwen_input_processor_tag_unification(hf_input_text, vllm_input_text,
num_images):
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-VL",
trust_remote_code=True)
hf_tok_ids = tokenizer.encode(hf_input_text)
vllm_tok_ids = get_qwen_llm_inputs(
vllm_input_text,
tokenizer,
num_images,
multi_modal_data=None,
)["prompt_token_ids"]
assert len(vllm_tok_ids) == len(hf_tok_ids)
assert vllm_tok_ids == hf_tok_ids


def run_test(
tmp_path: pathlib.PosixPath,
hf_runner: Type[HfRunner],
vllm_runner: Type[VllmRunner],
image_assets: _ImageAssets,
model: str,
*,
size_factors: List[float],
dtype: str,
max_tokens: int,
num_logprobs: int,
tensor_parallel_size: int,
distributed_executor_backend: Optional[str] = None,
):
"""Inference result should be the same between hf and vllm.
All the image fixtures for the test is under tests/images.
For huggingface runner, we provide the PIL images as input.
For vllm runner, we provide MultiModalDataDict objects
and corresponding MultiModalConfig as input.
Note, the text input is also adjusted to abide by vllm contract.
The text output is sanitized to be able to compare with hf.
"""
images = [asset.pil_image for asset in image_assets]

# Export the images to a tempdir and substitute it into the hf prompt;
# the contents between <img>/</img> will be ignored by VLLM, but the
# transformers implementation for the visual transformer parses this to
# reload it in the forward call; the contents are treated as a URL or a
# local path.
for idx, asset in enumerate(image_assets):
image_tmp_path = tmp_path / f"{asset.name}.jpg"
asset.pil_image.save(image_tmp_path)
HF_IMAGE_PROMPTS[idx] = HF_IMAGE_PROMPTS[idx].replace(
"<img></img>", f"<img>{image_tmp_path}</img>")

inputs_per_image = [(
[prompt for _ in size_factors],
[rescale_image_size(image, factor) for factor in size_factors],
) for image, prompt in zip(images, HF_IMAGE_PROMPTS)]

# NOTE: take care of the order. run vLLM first, and then run HF.
# vLLM needs a fresh new process without cuda initialization.
# if we run HF first, the cuda initialization will be done and it
# will hurt multiprocessing backend with fork method (the default method).

# Text only tests; the primary purpose of this test is to ensure that we can
# load Qwen models, e.g., Qwen/Qwen-7B-Chat, that do not have a visual config,
# without any problems.
# max_model_len should be greater than image_feature_size
with vllm_runner(model,
max_model_len=2048,
dtype=dtype,
tensor_parallel_size=tensor_parallel_size,
distributed_executor_backend=distributed_executor_backend,
enforce_eager=True) as vllm_model:
vllm_outputs_per_image = [
vllm_model.generate_greedy_logprobs(prompts,
max_tokens,
num_logprobs=num_logprobs,
images=images)
for prompts, images in inputs_per_image
]

with hf_runner(model, dtype=dtype) as hf_model:
hf_outputs_per_image = [
hf_model.generate_greedy_logprobs_limit(prompts,
max_tokens,
num_logprobs=num_logprobs,
images=images)
for prompts, images in inputs_per_image
]

for hf_outputs, vllm_outputs in zip(hf_outputs_per_image,
vllm_outputs_per_image):

check_logprobs_close(
outputs_0_lst=hf_outputs,
outputs_1_lst=vllm_outputs,
name_0="hf",
name_1="vllm",
)


@pytest.mark.parametrize("model", multimodal_models)
@pytest.mark.parametrize(
"size_factors",
[
# No image
[],
# Single-scale
[1.0],
# Single-scale, batched
[1.0, 1.0, 1.0],
# Multi-scale
[0.25, 0.5, 1.0],
],
)
@pytest.mark.parametrize("dtype", ["bfloat16"])
@pytest.mark.parametrize("max_tokens", [8])
@pytest.mark.parametrize("num_logprobs", [5])
def test_multimodal_models(tmp_path, hf_runner, vllm_runner, image_assets,
model, size_factors, dtype, max_tokens,
num_logprobs) -> None:
run_test(
tmp_path,
hf_runner,
vllm_runner,
image_assets,
model,
size_factors=size_factors,
dtype=dtype,
max_tokens=max_tokens,
num_logprobs=num_logprobs,
tensor_parallel_size=1,
)


### Tests for language only Qwen models
@pytest.mark.parametrize("dtype", ["half"])
@pytest.mark.parametrize("max_tokens", [32])
@pytest.mark.parametrize("num_logprobs", [5])
Expand All @@ -27,19 +179,18 @@ def test_text_only_qwen_model(
max_tokens: int,
num_logprobs: int,
):
# This test checks language inputs only, since the visual component
# for qwen-vl is still unsupported in VLLM. In the near-future, the
# implementation and this test will be extended to consider
# visual inputs as well.
with hf_runner(model, dtype=dtype) as hf_model:
hf_outputs = hf_model.generate_greedy_logprobs_limit(
# the primary purpose of this test is to ensure that we can
# load Qwen models, e.g., Qwen/Qwen-7B-Chat, that do not have a visual
# config, without any problems.
with vllm_runner(model, dtype=dtype) as vllm_model:
vllm_outputs = vllm_model.generate_greedy_logprobs(
example_prompts,
max_tokens,
num_logprobs=num_logprobs,
)

with vllm_runner(model, dtype=dtype) as vllm_model:
vllm_outputs = vllm_model.generate_greedy_logprobs(
with hf_runner(model, dtype=dtype) as hf_model:
hf_outputs = hf_model.generate_greedy_logprobs_limit(
example_prompts,
max_tokens,
num_logprobs=num_logprobs,
Expand Down
39 changes: 33 additions & 6 deletions vllm/model_executor/models/qwen.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
"""Inference-only QWen model compatible with HuggingFace weights."""

import math
import re
from array import array
from collections import OrderedDict
from functools import partial
Expand All @@ -20,7 +21,7 @@
from torch.nn.init import trunc_normal_
from torchvision import transforms
from torchvision.transforms import InterpolationMode
from transformers import PretrainedConfig
from transformers import PretrainedConfig, PreTrainedTokenizer

from vllm.attention import Attention, AttentionMetadata
from vllm.config import CacheConfig, MultiModalConfig
Expand All @@ -42,7 +43,7 @@
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.models.interfaces import SupportsMultiModal
from vllm.model_executor.sampling_metadata import SamplingMetadata
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalDataDict
from vllm.multimodal.base import MultiModalInputs
from vllm.multimodal.utils import cached_get_tokenizer
from vllm.sequence import (VLLM_TOKEN_ID_ARRAY_TYPE, IntermediateTensors,
Expand Down Expand Up @@ -800,13 +801,39 @@ def input_processor_for_qwen(ctx: InputContext, llm_inputs: LLMInputs):
if prompt is None:
prompt = tokenizer.decode(prompt_token_ids)

# Iteratively replace image tags for every image that we expect
# Currently we only allow multiple images input as embeddings.
num_img_tags = prompt.count("<image>")
return get_qwen_llm_inputs(prompt, tokenizer, num_images, multi_modal_data)


def get_qwen_llm_inputs(
prompt: str, tokenizer: PreTrainedTokenizer, num_images: int,
multi_modal_data: Optional[MultiModalDataDict]) -> LLMInputs:
"""Standardize the image token format. Qwen generally expects images
to be formatted matching the regex below, but currently, we also let
users pass <image>. This offers a couple benefits.
1. Usually the picture numbering is automatically done by the tokenizer
utils when converting from a list format. Expecting users to do it
correctly when they may not have the tokenizer on the client side is
error-prone, e.g., users may accidentally 0-index their images, which
can cause weird results
2. Chat can use this to encode images for Qwen without having to consider
image indices at the moment.
Args:
prompt: Prompt whose image tags will be standardized.
tokenizer: Qwen tokenizer for this model.
num_images: Number of images passed in the multimodal data.
multi_modal_data: Multimodal data for this request.
Returns:
LLM data to be returned by the input processor.
"""
prompt = re.sub(r"Picture :\d* <img>.+?<\/img>", "<image>", prompt)
num_img_tags = prompt.count("<image>")
if num_img_tags != num_images:
logger.warning(
"Number of <image> tokens does not match the number of images")
"Number of image placeholders does not match the number of images")

# Only replace as many image tags as we are going to be able to process
# correctly. Sequentially replace image tags; padding shenanigans are
Expand Down

0 comments on commit fcdd6f1

Please sign in to comment.