Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Model] Fix Phi-3.5-vision-instruct 'num_crops' issue #7710

Merged
merged 6 commits into from
Aug 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions docs/source/models/supported_models.rst
Original file line number Diff line number Diff line change
Expand Up @@ -225,9 +225,9 @@ Multimodal Language Models
- :code:`google/paligemma-3b-pt-224`, :code:`google/paligemma-3b-mix-224`, etc.
-
* - :code:`Phi3VForCausalLM`
- Phi-3-Vision
- Phi-3-Vision, Phi-3.5-Vision
- Image
- :code:`microsoft/Phi-3-vision-128k-instruct`, etc.
- :code:`microsoft/Phi-3-vision-128k-instruct`, :code:`microsoft/Phi-3.5-vision-instruct` etc.
-
* - :code:`MiniCPMV`
- MiniCPM-V
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_phi3v.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
"<|user|>\n<|image_1|>\nWhat is the season?<|end|>\n<|assistant|>\n",
})

models = ["microsoft/Phi-3-vision-128k-instruct"]
models = ["microsoft/Phi-3.5-vision-instruct"]


def vllm_to_hf_output(vllm_output: Tuple[List[int], str,
Expand Down
6 changes: 5 additions & 1 deletion vllm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@
from vllm.model_executor.models import ModelRegistry
from vllm.platforms import current_platform
from vllm.tracing import is_otel_available, otel_import_error_traceback
from vllm.transformers_utils.config import get_config, get_hf_text_config
from vllm.transformers_utils.config import (get_config,
get_hf_image_processor_config,
get_hf_text_config)
from vllm.utils import (STR_NOT_IMPL_ENC_DEC_CUDAGRAPH, GiB_bytes,
cuda_device_count_stateless, get_cpu_memory, is_cpu,
is_hip, is_neuron, is_openvino, is_xpu,
Expand Down Expand Up @@ -166,6 +168,8 @@ def __init__(
self.hf_config = get_config(self.model, trust_remote_code, revision,
code_revision, rope_scaling, rope_theta)
self.hf_text_config = get_hf_text_config(self.hf_config)
self.hf_image_processor_config = get_hf_image_processor_config(
self.model, revision)
self.dtype = _get_and_verify_dtype(self.hf_text_config, dtype)

# Choose a default enforce_eager value if the user did not specify
Expand Down
11 changes: 9 additions & 2 deletions vllm/inputs/registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
from array import array
from collections import UserDict
from dataclasses import dataclass
from typing import (TYPE_CHECKING, Callable, Dict, Mapping, Optional, Protocol,
Tuple, Type)
from typing import (TYPE_CHECKING, Any, Callable, Dict, Mapping, Optional,
Protocol, Tuple, Type)

from torch import nn
from transformers import PretrainedConfig
Expand Down Expand Up @@ -55,6 +55,13 @@ def get_hf_config(self, hf_config_type: Type[C] = PretrainedConfig) -> C:

return hf_config

def get_hf_image_processor_config(self) -> Dict[str, Any]:
"""
Get the HuggingFace image processor configuration of the model.
"""

return self.model_config.hf_image_processor_config


N = TypeVar("N", bound=Type[nn.Module])

Expand Down
12 changes: 6 additions & 6 deletions vllm/model_executor/models/phi3v.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
# limitations under the License.
import re
from functools import lru_cache
from typing import (Iterable, List, Literal, Mapping, Optional, Tuple,
TypedDict, Union)
from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional,
Tuple, TypedDict, Union)

import numpy as np
import torch
Expand Down Expand Up @@ -324,12 +324,12 @@ def _calc_hd_transform_size(*, width: int, height: int, hd_num: int = 16):

# Based on https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/image_processing_phi3_v.py#L181
def get_phi3v_image_feature_size(
hf_config: PretrainedConfig,
hf_config: Dict[str, Any],
*,
input_height: int,
input_width: int,
) -> int:
num_crops = getattr(hf_config, "num_crops", 16)
num_crops = hf_config.get("num_crops", 16)
new_width, new_height = _calc_hd_transform_size(width=input_width,
height=input_height,
hd_num=num_crops)
Expand All @@ -341,7 +341,7 @@ def get_phi3v_image_feature_size(
def get_max_phi3v_image_tokens(ctx: InputContext):

return get_phi3v_image_feature_size(
ctx.get_hf_config(),
ctx.get_hf_image_processor_config(),
input_height=MAX_IMAGE_FEATURE_SIZE_HEIGHT,
input_width=MAX_IMAGE_FEATURE_SIZE_WIDTH,
)
Expand Down Expand Up @@ -395,7 +395,7 @@ def input_processor_for_phi3v(ctx: InputContext, llm_inputs: LLMInputs):
return llm_inputs

model_config = ctx.model_config
hf_config = ctx.get_hf_config()
hf_config = ctx.get_hf_image_processor_config()

image_data = multi_modal_data["image"]
if isinstance(image_data, Image.Image):
Expand Down
15 changes: 14 additions & 1 deletion vllm/transformers_utils/config.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import contextlib
from pathlib import Path
from typing import Dict, Optional, Type, Union
from typing import Any, Dict, Optional, Type, Union

from transformers import GenerationConfig, PretrainedConfig
from transformers.models.auto.image_processing_auto import (
get_image_processor_config)
from transformers.models.auto.modeling_auto import (
MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)

Expand Down Expand Up @@ -97,6 +99,17 @@ def get_config(
return config


def get_hf_image_processor_config(
model: Union[str, Path],
revision: Optional[str] = None,
**kwargs,
) -> Dict[str, Any]:
# Separate model folder from file path for GGUF models
if Path(model).is_file() and Path(model).suffix == ".gguf":
model = Path(model).parent
return get_image_processor_config(model, revision=revision, **kwargs)


def get_hf_text_config(config: PretrainedConfig):
"""Get the "sub" config relevant to llm for multi modal models.
No op for pure text models.
Expand Down
Loading