From 874a581f2d088809f7c7bb77e440a8f0bc170101 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Mon, 8 Apr 2024 14:44:29 +0000 Subject: [PATCH 01/45] Add basic support for OpenAI image input API - Refactor `OpenAIServingChat` and add function for loading image - Move `pillow` dev dependency to common - Add example chat template for LLaVA model --- examples/template_llava.jinja | 11 ++ requirements-common.txt | 1 + requirements-dev.txt | 3 - vllm/entrypoints/openai/protocol.py | 57 +++++++- vllm/entrypoints/openai/serving_chat.py | 179 ++++++++++++++++++++---- vllm/utils.py | 27 ++++ 6 files changed, 244 insertions(+), 34 deletions(-) create mode 100644 examples/template_llava.jinja diff --git a/examples/template_llava.jinja b/examples/template_llava.jinja new file mode 100644 index 000000000000..16b385465e69 --- /dev/null +++ b/examples/template_llava.jinja @@ -0,0 +1,11 @@ +{%- for message in messages -%} + {{ message['role'].upper() + ': ' + message['content'] }} + {%- if (loop.last and add_generation_prompt) or not loop.last -%} + {{- '\n' -}} + {%- endif -%} +{%- endfor -%} + + +{%- if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} + {{- 'ASSISTANT:' -}} +{% endif %} \ No newline at end of file diff --git a/requirements-common.txt b/requirements-common.txt index ff053388a23e..cb766b2fe3c2 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -9,6 +9,7 @@ transformers >= 4.39.1 # Required for StarCoder2 & Llava. fastapi uvicorn[standard] pydantic >= 2.0 # Required for OpenAI server. +pillow # Required for OpenAI image input prometheus_client >= 0.18.0 tiktoken == 0.6.0 # Required for DBRX tokenizer outlines == 0.0.34 # Requires torch >= 2.1.0 \ No newline at end of file diff --git a/requirements-dev.txt b/requirements-dev.txt index 75d22bbdb2a1..b806c924a4e0 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -29,6 +29,3 @@ ai2-olmo # required for OLMo # Benchmarking aiohttp - -# Multimodal -pillow diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index f94d22d279cc..b06d54ce94cf 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -59,10 +59,65 @@ class ResponseFormat(BaseModel): type: str = Literal["text", "json_object"] +class ChatCompletionSystemMessage(BaseModel): + role: Literal["system"] + content: str + name: Optional[str] = None + + +class ChatCompletionTextContentPart(BaseModel): + type: Literal['text'] + text: str + + +class ChatCompletionImageURL(BaseModel): + url: str + detail: Literal["low", "high", "auto"] = "auto" + + +class ChatCompletionImageContentPart(BaseModel): + type: Literal['image_url'] + image_url: ChatCompletionImageURL + + +class ChatCompletionUserMessage(BaseModel): + role: Literal["user"] + content: Union[str, List[Union[ChatCompletionTextContentPart, + ChatCompletionImageContentPart]]] + name: Optional[str] = None + + +class ChatCompletionAssistantFunctionTool(BaseModel): + name: str + arguments: str + + +class ChatCompletionAssistantFunctionToolCall(BaseModel): + id: str + type: Literal["function"] + function: ChatCompletionAssistantFunctionTool + + +class ChatCompletionAssistantMessage(BaseModel): + role: Literal["assistant"] + content: Optional[str] = None + name: Optional[str] = None + tool_calls: Optional[List[ChatCompletionAssistantFunctionToolCall]] = None + + +class ChatCompletionToolMessage(BaseModel): + role: Literal["tool"] + content: str + tool_call_id: str + + class ChatCompletionRequest(BaseModel): # Ordered by official OpenAI API documentation # https://platform.openai.com/docs/api-reference/chat/create - messages: List[Dict[str, str]] + messages: List[Union[ChatCompletionSystemMessage, + ChatCompletionUserMessage, + ChatCompletionAssistantMessage, + ChatCompletionToolMessage]] model: str frequency_penalty: Optional[float] = 0.0 logit_bias: Optional[Dict[str, float]] = None diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 0980c3d3cb61..a48802c3dd59 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -1,21 +1,26 @@ import codecs import time -from typing import AsyncGenerator, AsyncIterator, List, Optional, Union +from typing import AsyncGenerator, AsyncIterator, List, Optional, Tuple, Union +import numpy as np +import torch from fastapi import Request +from vllm.config import VisionLanguageConfig from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.entrypoints.openai.protocol import ( - ChatCompletionRequest, ChatCompletionResponse, - ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice, - ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse, + ChatCompletionImageContentPart, ChatCompletionRequest, + ChatCompletionResponse, ChatCompletionResponseChoice, + ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, + ChatCompletionTextContentPart, ChatMessage, DeltaMessage, ErrorResponse, UsageInfo) from vllm.entrypoints.openai.serving_engine import LoRA, OpenAIServing from vllm.logger import init_logger from vllm.model_executor.guided_decoding import ( get_guided_decoding_logits_processor) from vllm.outputs import RequestOutput -from vllm.utils import random_uuid +from vllm.sequence import MultiModalData +from vllm.utils import get_image_async, random_uuid logger = init_logger(__name__) @@ -34,6 +39,79 @@ def __init__(self, self.response_role = response_role self._load_chat_template(chat_template) + async def parse_chat_message_image_input( + self, + content: List[Union[ChatCompletionTextContentPart, + ChatCompletionImageContentPart]], + ) -> Tuple[List[str], List[MultiModalData]]: + """Parse image input according to OpenAI's API.""" + vlm_config = getattr(self.engine.engine, "vision_language_config", + None) + assert isinstance(vlm_config, VisionLanguageConfig), ( + "Provide `image_input_type` and other vision " + "related configurations through LLM entrypoint " + "or engine arguments.") + + if len(vlm_config.image_input_shape) == 3: + raise ValueError( + "The model is configured to accept image features rather than " + "pixel values, and thus does not support image inputs") + + batch_size, num_channels, height, width = vlm_config.image_input_shape + feature_size = vlm_config.image_feature_size + + if num_channels == 1: + image_format = "L" + elif num_channels == 3: + image_format = "RGB" + elif num_channels == 4: + image_format = "RGBA" + else: + msg = f"Unsupported number of channels ({num_channels})" + raise NotImplementedError(msg) + + content_texts: List[str] = [] + content_images: List[MultiModalData] = [] + + for i, part in enumerate(content): + if isinstance(part, ChatCompletionTextContentPart): + content_texts.append(part.text) + + if isinstance(part, ChatCompletionImageContentPart): + with await get_image_async(part.image_url.url) as image: + image = image.convert(image_format).resize((height, width)) + image_arr = np.array(image, copy=True) + + image_tensor = torch.as_tensor(image_arr) \ + .view(batch_size, height, width, num_channels) \ + .permute((0, 3, 1, 2)) \ + .to(torch.float16) + + content_texts.append("" * feature_size) + content_images.append( + MultiModalData( + type=MultiModalData.Type.IMAGE, + data=image_tensor, + )) + + if part.image_url.detail != "auto": + logger.info("content[%s].image_url.detail is ignored", i) + + return content_texts, content_images + + async def parse_chat_message_content( + self, + content: Optional[Union[str, + List[Union[ChatCompletionTextContentPart, + ChatCompletionImageContentPart]]]], + ) -> Tuple[List[str], List[MultiModalData]]: + if content is None: + return [], [] + if isinstance(content, str): + return [content], [] + + return await self.parse_chat_message_image_input(content) + async def create_chat_completion( self, request: ChatCompletionRequest, raw_request: Request ) -> Union[ErrorResponse, AsyncGenerator[str, None], @@ -52,10 +130,33 @@ async def create_chat_completion( return error_check_ret try: + conversation: List[ChatMessage] = [] + multi_modal_datas: List[MultiModalData] = [] + + for m in request.messages: + text, images = await self.parse_chat_message_content(m.content) + + conversation.append( + ChatMessage( + role=m.role, + content="\n".join(text), + )) + multi_modal_datas.extend(images) + + if len(multi_modal_datas) == 0: + multi_modal_data = None + elif len(multi_modal_datas) == 1: + multi_modal_data, = multi_modal_datas + else: + raise NotImplementedError("Multiple image input not supported") + prompt = self.tokenizer.apply_chat_template( - conversation=request.messages, + conversation=[ + message.model_dump() for message in conversation + ], tokenize=False, - add_generation_prompt=request.add_generation_prompt) + add_generation_prompt=request.add_generation_prompt, + ) except Exception as e: logger.error( f"Error in applying chat template from request: {str(e)}") @@ -78,17 +179,32 @@ async def create_chat_completion( except ValueError as e: return self.create_error_response(str(e)) - result_generator = self.engine.generate(prompt, sampling_params, - request_id, token_ids, - lora_request) + result_generator = self.engine.generate( + prompt, + sampling_params, + request_id, + token_ids, + lora_request=lora_request, + multi_modal_data=multi_modal_data, + ) + # Streaming response if request.stream: return self.chat_completion_stream_generator( - request, result_generator, request_id) + request, + conversation, + result_generator, + request_id, + ) else: try: return await self.chat_completion_full_generator( - request, raw_request, result_generator, request_id) + request, + raw_request, + conversation, + result_generator, + request_id, + ) except ValueError as e: # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) @@ -97,11 +213,14 @@ def get_chat_request_role(self, request: ChatCompletionRequest) -> str: if request.add_generation_prompt: return self.response_role else: - return request.messages[-1]["role"] + return request.messages[-1].role async def chat_completion_stream_generator( - self, request: ChatCompletionRequest, - result_generator: AsyncIterator[RequestOutput], request_id: str + self, + request: ChatCompletionRequest, + parsed_conversation: List[ChatMessage], + result_generator: AsyncIterator[RequestOutput], + request_id: str, ) -> Union[ErrorResponse, AsyncGenerator[str, None]]: model_name = request.model @@ -142,12 +261,10 @@ async def chat_completion_stream_generator( # last message if request.echo: last_msg_content = "" - if request.messages and isinstance( - request.messages, - list) and request.messages[-1].get( - "content") and request.messages[-1].get( - "role") == role: - last_msg_content = request.messages[-1]["content"] + if (parsed_conversation + and parsed_conversation[-1].content + and parsed_conversation[-1].role == role): + last_msg_content = parsed_conversation[-1].content if last_msg_content: for i in range(request.n): @@ -242,9 +359,13 @@ async def chat_completion_stream_generator( yield "data: [DONE]\n\n" async def chat_completion_full_generator( - self, request: ChatCompletionRequest, raw_request: Request, - result_generator: AsyncIterator[RequestOutput], - request_id: str) -> Union[ErrorResponse, ChatCompletionResponse]: + self, + request: ChatCompletionRequest, + raw_request: Request, + parsed_conversation: List[ChatMessage], + result_generator: AsyncIterator[RequestOutput], + request_id: str, + ) -> Union[ErrorResponse, ChatCompletionResponse]: model_name = request.model created_time = int(time.time()) @@ -258,7 +379,7 @@ async def chat_completion_full_generator( final_res = res assert final_res is not None - choices = [] + choices: List[ChatCompletionResponseChoice] = [] role = self.get_chat_request_role(request) for output in final_res.outputs: @@ -285,11 +406,9 @@ async def chat_completion_full_generator( if request.echo: last_msg_content = "" - if request.messages and isinstance( - request.messages, list) and request.messages[-1].get( - "content") and request.messages[-1].get( - "role") == role: - last_msg_content = request.messages[-1]["content"] + if (parsed_conversation and parsed_conversation[-1].content + and parsed_conversation[-1].role == role): + last_msg_content = parsed_conversation[-1].content for choice in choices: full_message = last_msg_content + choice.message.content diff --git a/vllm/utils.py b/vllm/utils.py index 380ffe76fea7..f3d18e560e5e 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -11,10 +11,14 @@ from platform import uname from typing import (Any, Awaitable, Callable, Generic, Hashable, List, Optional, Tuple, TypeVar, Union) +from urllib.parse import urlparse +from urllib.request import urlopen import psutil +import requests import torch from packaging.version import Version, parse +from PIL import Image from vllm.logger import init_logger @@ -213,6 +217,29 @@ def get_ip() -> str: return "0.0.0.0" +def get_image(url: str) -> Image.Image: + """Retrieves an image from a data URL or an online resource. + + The returned image should be used like a context manager to ensure + proper disposal of the underlying buffer. + """ + # Avoid circular import + from vllm import __version__ as VLLM_VERSION + + url_components = urlparse(url) + if url_components.scheme == 'data': + return Image.open(urlopen(url)) + + headers = {"User-Agent": f"vLLM/{VLLM_VERSION}"} + response = requests.get(url, headers=headers, stream=True) + response.raise_for_status() + + return Image.open(response.raw) + + +get_image_async = make_async(get_image) + + def get_distributed_init_method(ip: str, port: int) -> str: # Brackets are not permitted in ipv4 addresses, # see https://github.com/python/cpython/issues/103848 From 607434e93b07babf7ab1c6221d9ccb414da73fd4 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Tue, 9 Apr 2024 09:26:32 +0000 Subject: [PATCH 02/45] Update documentation - Add general guide for using VLMs - Add LLavA to list of supported models --- README.md | 1 + docs/source/index.rst | 1 + docs/source/models/supported_models.rst | 18 ++++ docs/source/models/vlm.rst | 107 ++++++++++++++++++++++++ 4 files changed, 127 insertions(+) create mode 100644 docs/source/models/vlm.rst diff --git a/README.md b/README.md index 2a070b9e2064..ba1fb9a1edeb 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ vLLM seamlessly supports many Hugging Face models, including the following archi - InternLM2 (`internlm/internlm2-7b`, `internlm/internlm2-chat-7b`, etc.) - Jais (`core42/jais-13b`, `core42/jais-13b-chat`, `core42/jais-30b-v3`, `core42/jais-30b-chat-v3`, etc.) - LLaMA & LLaMA-2 (`meta-llama/Llama-2-70b-hf`, `lmsys/vicuna-13b-v1.3`, `young-geng/koala`, `openlm-research/open_llama_13b`, etc.) +- LLavA-1.5 (`llava-hf/llava-1.5-7b-hf`, `llava-hf/llava-1.5-13b-hf`, etc.) - Mistral (`mistralai/Mistral-7B-v0.1`, `mistralai/Mistral-7B-Instruct-v0.1`, etc.) - Mixtral (`mistralai/Mixtral-8x7B-v0.1`, `mistralai/Mixtral-8x7B-Instruct-v0.1`, etc.) - MPT (`mosaicml/mpt-7b`, `mosaicml/mpt-30b`, etc.) diff --git a/docs/source/index.rst b/docs/source/index.rst index 5d5d52696ba3..b2745f019ba6 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -85,6 +85,7 @@ Documentation models/adding_model models/engine_args models/lora + models/vlm .. toctree:: :maxdepth: 1 diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index 9c2f5ba458eb..93241b3864c7 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -83,6 +83,24 @@ Alongside each architecture, we include some popular models that use it. - LLaMA, LLaMA-2, Vicuna, Alpaca, Yi - :code:`meta-llama/Llama-2-13b-hf`, :code:`meta-llama/Llama-2-70b-hf`, :code:`openlm-research/open_llama_13b`, :code:`lmsys/vicuna-13b-v1.3`, :code:`01-ai/Yi-6B`, :code:`01-ai/Yi-34B`, etc. - ✅︎ + * - :code:`LlavaForConditionalGeneration` + - LLaVA-1.5 + - :code:`llava-hf/llava-1.5-7b-hf`\*, :code:`llava-hf/llava-1.5-13b-hf`\*, etc. + + .. note:: + + Models with an asterisk (\*) are missing :code:`chat_template` from HuggingFace :code:`config.json`. A predefined template can be found in our repo (:code:`examples/template_llava.jinja`). To host the OpenAI-compatible server, provide the chat template via command-line arguments. You also need to provide the :code:`VisionLanguageConfig` to initialize the model. See the following example: + + .. code-block:: shell + + $ python -m vllm.entrypoints.openai.api_server \ + --model llava-hf/llava-1.5-7b-hf \ + --chat-template examples/template_llava.jinja \ + --image-input-type pixel_values \ + --image-token-id 32000 \ + --image-input-shape 1,3,336,336 \ + --image-feature-size 576 + - * - :code:`MistralForCausalLM` - Mistral, Mistral-Instruct - :code:`mistralai/Mistral-7B-v0.1`, :code:`mistralai/Mistral-7B-Instruct-v0.1`, etc. diff --git a/docs/source/models/vlm.rst b/docs/source/models/vlm.rst new file mode 100644 index 000000000000..e304bb930ad3 --- /dev/null +++ b/docs/source/models/vlm.rst @@ -0,0 +1,107 @@ +.. _vlm: + +Using VLMs +========== + +This document shows you how to run and serve Vision Language Models (VLMs) using vLLM. + +Additional Engine Arguments +--------------------------- + +Apart from the :ref:`basic engine arguments `, VLMs additionally require the following engine arguments for vLLM. + +.. option:: --image-input-type {pixel_values,image_features} + + The image input type passed into vLLM. Should be one of "pixel_values" or "image_features". + +.. option:: --image-token-id + + Input ID for image token. + +.. option:: --image-input-shape + + The biggest image input shape (worst for memory footprint) given an input type. Only used for vLLM's profile_run. + + For example, if the image tensor has shape :code:`(1, 3, 336, 336)`, then you should pass :code:`--image-input-shape 1,3,336,336`. + +.. option:: --image-feature-size + + The image feature size along the context dimension. + +Offline Batched Inference +------------------------- + +To initialize a VLM, the aforementioned arguments must be passed to the ``LLM`` class for instantiating the engine. + +.. code-block:: python + + llm = LLM( + model="llava-hf/llava-1.5-7b-hf", + image_input_type="pixel_values", + image_token_id=32000, + image_input_shape="1,3,336,336", + image_feature_size=576, + ) + +For now, we only support a single image per text prompt when calling ``llm.generate``. To pass an image to the model, note the following parameters: + +* ``prompt``: The prompt should have a number of ```` tokens equal to ``image_feature_size``. +* ``multi_modal_data``: This should be an instance of ``MultiModalData`` with type ``MultiModalData.Type.IMAGE`` and its data set to a single image tensor with the shape ``image_input_shape``. + +.. code-block:: python + + prompt = "" * 576 + ( + "\nUSER: What is the content of this image?\nASSISTANT:") + + # Load the image and reshape to (1, 3, 336, 336) + image = ... + + outputs = llm.generate(prompt, + multi_modal_data=MultiModalData( + type=MultiModalData.Type.IMAGE, data=image)) + + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + +A code example can be found in `examples/llava_example.py `_. + +OpenAI-Compatible Server +------------------------ + +We support image inputs to the OpenAI Chat API, as described in `GPT-4 with Vision `_. + +Here is a simple example using the :code:`openai` package: + +.. code-block:: python + + from openai import OpenAI + + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" + + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) + + chat_response = client.chat.completions.create( + model="llava-hf/llava-1.5-7b-hf", + messages=[{ + "role": "user", + "content": [ + {"type": "text", "text": "what's in this image?"}, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + }, + ], + }], + ) + print("Chat response:", chat_response) + +.. note:: + + For now, we only support a single image per API call. Also, the ``detail`` parameter is ignored since it may not be applicable to other models. From aaa6bfe880ff750d9dc65ec3d1a2afc1f0364328 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Tue, 9 Apr 2024 11:15:42 +0000 Subject: [PATCH 03/45] Add tests for OpenAI image input API and image loader - Move `ServerRunner` to common file --- .buildkite/test-pipeline.yaml | 3 + tests/entrypoints/test_openai_server.py | 48 +---- .../entrypoints/test_openai_server_vision.py | 173 ++++++++++++++++++ tests/test_utils.py | 70 +++++++ vllm/entrypoints/openai/test_utils.py | 49 +++++ 5 files changed, 297 insertions(+), 46 deletions(-) create mode 100644 tests/entrypoints/test_openai_server_vision.py create mode 100644 tests/test_utils.py create mode 100644 vllm/entrypoints/openai/test_utils.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 27e44463a30a..419252d03e47 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -73,6 +73,9 @@ steps: - label: LogitsProcessor Test command: pytest -v -s test_logits_processor.py +- label: Utils Test + command: pytest -v -s test_utils.py + - label: Worker Test command: pytest -v -s worker diff --git a/tests/entrypoints/test_openai_server.py b/tests/entrypoints/test_openai_server.py index 442f8bdf3b4b..60aa1369673d 100644 --- a/tests/entrypoints/test_openai_server.py +++ b/tests/entrypoints/test_openai_server.py @@ -1,10 +1,6 @@ # imports for guided decoding tests import json -import os import re -import subprocess -import sys -import time import jsonschema import openai # use the official client for correctness check @@ -12,13 +8,12 @@ # using Ray for overall ease of process management, parallel requests, # and debugging. import ray -import requests # downloading lora to test lora requests from huggingface_hub import snapshot_download +from vllm.entrypoints.openai.test_utils import ServerRunner from vllm.transformers_utils.tokenizer import get_tokenizer -MAX_SERVER_START_WAIT_S = 600 # wait for server to start for 60 seconds # any model with a chat template should work here MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta" # technically this needs Mistral-7B-v0.1 as base, but we're not testing @@ -75,51 +70,12 @@ pytestmark = pytest.mark.asyncio -@ray.remote(num_gpus=1) -class ServerRunner: - - def __init__(self, args): - env = os.environ.copy() - env["PYTHONUNBUFFERED"] = "1" - self.proc = subprocess.Popen( - ["python3", "-m", "vllm.entrypoints.openai.api_server"] + args, - env=env, - stdout=sys.stdout, - stderr=sys.stderr, - ) - self._wait_for_server() - - def ready(self): - return True - - def _wait_for_server(self): - # run health check - start = time.time() - while True: - try: - if requests.get( - "http://localhost:8000/health").status_code == 200: - break - except Exception as err: - if self.proc.poll() is not None: - raise RuntimeError("Server exited unexpectedly.") from err - - time.sleep(0.5) - if time.time() - start > MAX_SERVER_START_WAIT_S: - raise RuntimeError( - "Server failed to start in time.") from err - - def __del__(self): - if hasattr(self, "proc"): - self.proc.terminate() - - @pytest.fixture(scope="session") def zephyr_lora_files(): return snapshot_download(repo_id=LORA_NAME) -@pytest.fixture(scope="session") +@pytest.fixture(scope="module") def server(zephyr_lora_files): ray.init() server_runner = ServerRunner.remote([ diff --git a/tests/entrypoints/test_openai_server_vision.py b/tests/entrypoints/test_openai_server_vision.py new file mode 100644 index 000000000000..d804cf5ba40a --- /dev/null +++ b/tests/entrypoints/test_openai_server_vision.py @@ -0,0 +1,173 @@ +from pathlib import Path + +import openai # use the official client for correctness check +import pytest +# using Ray for overall ease of process management, parallel requests, +# and debugging. +import ray + +from vllm.entrypoints.openai.test_utils import ServerRunner + +MODEL_NAME = "llava-hf/llava-1.5-7b-hf" +CHAT_TEMPLATE = (Path(__file__).parent.parent.parent / + "examples/template_llava.jinja") +assert CHAT_TEMPLATE.exists() + +# Test different image extensions (JPG/PNG) and formats (gray/RGB/RGBA) +TEST_IMAGE_URLS = [ + "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + "https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png", + "https://upload.wikimedia.org/wikipedia/commons/thumb/9/91/Venn_diagram_rgb.svg/1280px-Venn_diagram_rgb.svg.png", + "https://upload.wikimedia.org/wikipedia/commons/0/0b/RGBA_comp.png", +] + +pytestmark = pytest.mark.asyncio + + +@pytest.fixture(scope="module") +def server(): + ray.init() + server_runner = ServerRunner.remote([ + "--model", + MODEL_NAME, + # use half precision for speed and memory savings in CI environment + "--dtype", + "bfloat16", + "--max-model-len", + "4096", + "--enforce-eager", + # vision language config below + "--image-input-type", + "pixel_values", + "--image-token-id", + "32000", + "--image-input-shape", + "1,3,336,336", + "--image-feature-size", + "576", + # chat template required for LLaVA + "--chat-template", + str(CHAT_TEMPLATE), + ]) + ray.get(server_runner.ready.remote()) + yield server_runner + ray.shutdown() + + +@pytest.fixture(scope="session") +def client(): + client = openai.AsyncOpenAI( + base_url="http://localhost:8000/v1", + api_key="token-abc123", + ) + yield client + + +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +@pytest.mark.parametrize("image_url", TEST_IMAGE_URLS) +async def test_single_chat_session_image(server, client: openai.AsyncOpenAI, + model_name: str, image_url: str): + messages = [{ + "role": + "user", + "content": [ + { + "type": "text", + "text": "What's in this image? Describe in detail." + }, + { + "type": "image_url", + "image_url": { + "url": image_url + } + }, + ], + }] + + # test single completion + chat_completion = await client.chat.completions.create(model=model_name, + messages=messages, + max_tokens=10, + logprobs=True, + top_logprobs=5) + assert chat_completion.id is not None + assert chat_completion.choices is not None and len( + chat_completion.choices) == 1 + assert chat_completion.choices[0].message is not None + assert chat_completion.choices[0].logprobs is not None + assert chat_completion.choices[0].logprobs.top_logprobs is not None + assert len(chat_completion.choices[0].logprobs.top_logprobs[0]) == 5 + message = chat_completion.choices[0].message + assert message.content is not None and len(message.content) >= 10 + assert message.role == "assistant" + messages.append({"role": "assistant", "content": message.content}) + + # test multi-turn dialogue + messages.append({"role": "user", "content": "express your result in json"}) + chat_completion = await client.chat.completions.create( + model=model_name, + messages=messages, + max_tokens=10, + ) + message = chat_completion.choices[0].message + assert message.content is not None and len(message.content) >= 0 + + +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +@pytest.mark.parametrize("image_url", TEST_IMAGE_URLS) +async def test_chat_streaming_image(server, client: openai.AsyncOpenAI, + model_name: str, image_url: str): + messages = [{ + "role": + "user", + "content": [ + { + "type": "text", + "text": "What's in this image? Describe in detail." + }, + { + "type": "image_url", + "image_url": { + "url": image_url + } + }, + ], + }] + + # test single completion + chat_completion = await client.chat.completions.create( + model=model_name, + messages=messages, + max_tokens=10, + temperature=0.0, + ) + output = chat_completion.choices[0].message.content + stop_reason = chat_completion.choices[0].finish_reason + + # test streaming + stream = await client.chat.completions.create( + model=model_name, + messages=messages, + max_tokens=10, + temperature=0.0, + stream=True, + ) + chunks = [] + finish_reason_count = 0 + async for chunk in stream: + delta = chunk.choices[0].delta + if delta.role: + assert delta.role == "assistant" + if delta.content: + chunks.append(delta.content) + if chunk.choices[0].finish_reason is not None: + finish_reason_count += 1 + # finish reason should only return in last block + assert finish_reason_count == 1 + assert chunk.choices[0].finish_reason == stop_reason + assert delta.content + assert "".join(chunks) == output + + +if __name__ == "__main__": + pytest.main([__file__]) diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 000000000000..affcb93944d3 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,70 @@ +import base64 +import mimetypes +from tempfile import NamedTemporaryFile +from typing import Dict, Tuple + +import numpy as np +import pytest +from PIL import Image + +from vllm.utils import get_image + +# Test different image extensions (JPG/PNG) and formats (gray/RGB/RGBA) +TEST_IMAGE_URLS = [ + "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + "https://upload.wikimedia.org/wikipedia/commons/f/fa/Grayscale_8bits_palette_sample_image.png", + "https://upload.wikimedia.org/wikipedia/commons/thumb/9/91/Venn_diagram_rgb.svg/1280px-Venn_diagram_rgb.svg.png", + "https://upload.wikimedia.org/wikipedia/commons/0/0b/RGBA_comp.png", +] + + +@pytest.fixture(scope="session") +def url_images() -> Dict[str, Image.Image]: + return {image_url: get_image(image_url) for image_url in TEST_IMAGE_URLS} + + +def get_supported_suffixes() -> Tuple[str, ...]: + # We should at least test the file types mentioned in GPT-4 with Vision + OPENAI_SUPPORTED_SUFFIXES = ('.png', '.jpeg', '.jpg', '.webp', '.gif') + + # Additional file types that are supported by us + EXTRA_SUPPORTED_SUFFIXES = ('.bmp', '.tiff') + + return OPENAI_SUPPORTED_SUFFIXES + EXTRA_SUPPORTED_SUFFIXES + + +def _image_equals(a: Image.Image, b: Image.Image) -> bool: + return (np.asarray(a) == np.asarray(b.convert(a.mode))).all() + + +@pytest.mark.parametrize("image_url", TEST_IMAGE_URLS) +@pytest.mark.parametrize("suffix", get_supported_suffixes()) +def test_get_image_base64(url_images: Dict[str, Image.Image], image_url: str, + suffix: str): + url_image = url_images[image_url] + + try: + mime_type = Image.MIME[Image.registered_extensions()[suffix]] + except KeyError: + try: + mime_type = mimetypes.types_map[suffix] + except KeyError: + pytest.skip('No MIME type') + + with NamedTemporaryFile(suffix=suffix) as f: + try: + url_image.save(f.name) + except Exception as e: + if e.args[0] == 'cannot write mode RGBA as JPEG': + pytest.skip('Conversion not supported') + + raise + + base64_image = base64.b64encode(f.read()).decode("utf-8") + data_url = f"data:{mime_type};base64,{base64_image}" + + with get_image(data_url) as data_image: + if _image_equals(url_image, Image.open(f)): + assert _image_equals(url_image, data_image) + else: + pass # Lossy format; only check that image can be opened diff --git a/vllm/entrypoints/openai/test_utils.py b/vllm/entrypoints/openai/test_utils.py new file mode 100644 index 000000000000..37b1698b9212 --- /dev/null +++ b/vllm/entrypoints/openai/test_utils.py @@ -0,0 +1,49 @@ +import os +import subprocess +import sys +import time + +# using Ray for overall ease of process management, parallel requests, +# and debugging. +import ray +import requests + +MAX_SERVER_START_WAIT_S = 600 # wait for server to start for 60 seconds + +@ray.remote(num_gpus=1) +class ServerRunner: + + def __init__(self, args): + env = os.environ.copy() + env["PYTHONUNBUFFERED"] = "1" + self.proc = subprocess.Popen( + ["python3", "-m", "vllm.entrypoints.openai.api_server"] + args, + env=env, + stdout=sys.stdout, + stderr=sys.stderr, + ) + self._wait_for_server() + + def ready(self): + return True + + def _wait_for_server(self): + # run health check + start = time.time() + while True: + try: + if requests.get( + "http://localhost:8000/health").status_code == 200: + break + except Exception as err: + if self.proc.poll() is not None: + raise RuntimeError("Server exited unexpectedly.") from err + + time.sleep(0.5) + if time.time() - start > MAX_SERVER_START_WAIT_S: + raise RuntimeError( + "Server failed to start in time.") from err + + def __del__(self): + if hasattr(self, "proc"): + self.proc.terminate() From 44829b5b3ae92cae0259300ee2990faa948b56da Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 11 Apr 2024 03:36:06 +0000 Subject: [PATCH 04/45] Apply formatter --- vllm/entrypoints/openai/test_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/entrypoints/openai/test_utils.py b/vllm/entrypoints/openai/test_utils.py index 37b1698b9212..058efd4cfcbe 100644 --- a/vllm/entrypoints/openai/test_utils.py +++ b/vllm/entrypoints/openai/test_utils.py @@ -10,6 +10,7 @@ MAX_SERVER_START_WAIT_S = 600 # wait for server to start for 60 seconds + @ray.remote(num_gpus=1) class ServerRunner: From bccb3678ca264624767bcafdf5bd6b464f312803 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 11 Apr 2024 08:16:50 +0000 Subject: [PATCH 05/45] Place image before text for `llava-hf` model --- docs/source/models/vlm.rst | 3 ++- tests/entrypoints/test_openai_server_vision.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/source/models/vlm.rst b/docs/source/models/vlm.rst index e304bb930ad3..fcf0d1da3bbc 100644 --- a/docs/source/models/vlm.rst +++ b/docs/source/models/vlm.rst @@ -85,18 +85,19 @@ Here is a simple example using the :code:`openai` package: base_url=openai_api_base, ) + # Note that this model expects the image to come before the main text chat_response = client.chat.completions.create( model="llava-hf/llava-1.5-7b-hf", messages=[{ "role": "user", "content": [ - {"type": "text", "text": "what's in this image?"}, { "type": "image_url", "image_url": { "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", }, }, + {"type": "text", "text": "What's in this image?"}, ], }], ) diff --git a/tests/entrypoints/test_openai_server_vision.py b/tests/entrypoints/test_openai_server_vision.py index d804cf5ba40a..f50537bd41a7 100644 --- a/tests/entrypoints/test_openai_server_vision.py +++ b/tests/entrypoints/test_openai_server_vision.py @@ -71,16 +71,16 @@ async def test_single_chat_session_image(server, client: openai.AsyncOpenAI, "role": "user", "content": [ - { - "type": "text", - "text": "What's in this image? Describe in detail." - }, { "type": "image_url", "image_url": { "url": image_url } }, + { + "type": "text", + "text": "What's in this image?" + }, ], }] @@ -121,16 +121,16 @@ async def test_chat_streaming_image(server, client: openai.AsyncOpenAI, "role": "user", "content": [ - { - "type": "text", - "text": "What's in this image? Describe in detail." - }, { "type": "image_url", "image_url": { "url": image_url } }, + { + "type": "text", + "text": "What's in this image?" + }, ], }] From b9302e88ee93e02ce4f392f42696ab85c97a342e Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 11 Apr 2024 08:17:46 +0000 Subject: [PATCH 06/45] Internally enable customization of merging image with text prompt --- vllm/config.py | 55 ++++++++++- vllm/entrypoints/openai/serving_chat.py | 107 ++++++++++++---------- vllm/entrypoints/openai/serving_engine.py | 6 +- 3 files changed, 111 insertions(+), 57 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 4102edbe01d3..73847f10ce3e 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2,11 +2,11 @@ import json import os from dataclasses import dataclass, fields -from typing import TYPE_CHECKING, ClassVar, Optional, Union +from typing import TYPE_CHECKING, ClassVar, Optional, Protocol, Union import torch from packaging.version import Version -from transformers import PretrainedConfig +from transformers import PretrainedConfig, PreTrainedTokenizerBase from vllm.logger import init_logger from vllm.transformers_utils.config import get_config, get_hf_text_config @@ -834,6 +834,44 @@ def verify_with_scheduler_config(self, scheduler_config: SchedulerConfig): "LoRA is enabled.") +class OpenAIVisionAdapter(Protocol): + + def get_image_token_text(self, config: "VisionLanguageConfig", + tokenizer: PreTrainedTokenizerBase, + image_idx: int) -> str: + """Defines how to represent an image in the text prompt.""" + ... + + +class OpenAIVisionAdapterForNoImage(OpenAIVisionAdapter): + + def get_image_token_text(self, config: "VisionLanguageConfig", + tokenizer: PreTrainedTokenizerBase, + image_idx: int) -> str: + raise NotImplementedError("Image input not supported") + + +class OpenAIVisionAdapterForSingleImage(OpenAIVisionAdapter): + + def get_image_token_text(self, config: "VisionLanguageConfig", + tokenizer: PreTrainedTokenizerBase, + image_idx: int) -> str: + if image_idx > 0: + raise NotImplementedError("Multiple image input not supported") + + image_token_str = tokenizer.decode(config.image_token_id) + return image_token_str * config.image_feature_size + + +class OpenAIVisionAdapterForMultiImage(OpenAIVisionAdapter): + + def get_image_token_text(self, config: "VisionLanguageConfig", + tokenizer: PreTrainedTokenizerBase, + image_idx: int) -> str: + image_token_str = tokenizer.decode(config.image_token_id + image_idx) + return image_token_str * config.image_feature_size + + @dataclass class VisionLanguageConfig: """Configs the input data format and how models should run for @@ -855,6 +893,14 @@ class ImageInputType(enum.Enum): PIXEL_VALUES = enum.auto() IMAGE_FEATURES = enum.auto() + class OpenAIVisionAPI(enum.Enum): + """Specifies how the model supports + `OpenAI's GPT-4 with Vision API `_. + """ + UNSUPPORTED = OpenAIVisionAdapterForNoImage() + SINGLE_IMAGE = OpenAIVisionAdapterForSingleImage() + MULTI_IMAGE = OpenAIVisionAdapterForMultiImage() + image_input_type: ImageInputType # The input id corresponding to image token. image_token_id: int @@ -864,9 +910,10 @@ class ImageInputType(enum.Enum): image_input_shape: tuple image_feature_size: int + openai_api: OpenAIVisionAPI = OpenAIVisionAPI.SINGLE_IMAGE + @classmethod - def get_image_input_enum_type( - cls, value: str) -> "VisionLanguageConfig.ImageInputType": + def get_image_input_enum_type(cls, value: str) -> ImageInputType: """Get the image input type from a string.""" try: return cls.ImageInputType[value.upper()] diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index a48802c3dd59..d804284f38b8 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -5,6 +5,8 @@ import numpy as np import torch from fastapi import Request +from PIL import Image +from transformers import PreTrainedTokenizerBase from vllm.config import VisionLanguageConfig from vllm.engine.async_llm_engine import AsyncLLMEngine @@ -25,6 +27,46 @@ logger = init_logger(__name__) +def parse_image( + config: VisionLanguageConfig, + tokenizer: PreTrainedTokenizerBase, + image: Image.Image, + image_idx: int, +) -> Tuple[str, MultiModalData]: + text = config.openai_api.value \ + .get_image_token_text(config, tokenizer, image_idx) + + if len(config.image_input_shape) == 3: + raise ValueError( + "The model is configured to accept image features rather than " + "pixel values, and thus does not support image inputs") + + batch_size, num_channels, height, width = config.image_input_shape + + if num_channels == 1: + image_format = "L" + elif num_channels == 3: + image_format = "RGB" + elif num_channels == 4: + image_format = "RGBA" + else: + msg = f"Unsupported number of channels ({num_channels})" + raise NotImplementedError(msg) + + with image: + image = image.convert(image_format).resize((height, width)) + image_arr = np.array(image, copy=True) + + image_tensor = torch.as_tensor(image_arr) \ + .view(batch_size, height, width, num_channels) \ + .permute((0, 3, 1, 2)) \ + .to(torch.float16) + + data = MultiModalData(type=MultiModalData.Type.IMAGE, data=image_tensor) + + return text, data + + class OpenAIServingChat(OpenAIServing): def __init__(self, @@ -44,31 +86,12 @@ async def parse_chat_message_image_input( content: List[Union[ChatCompletionTextContentPart, ChatCompletionImageContentPart]], ) -> Tuple[List[str], List[MultiModalData]]: - """Parse image input according to OpenAI's API.""" - vlm_config = getattr(self.engine.engine, "vision_language_config", - None) - assert isinstance(vlm_config, VisionLanguageConfig), ( - "Provide `image_input_type` and other vision " - "related configurations through LLM entrypoint " - "or engine arguments.") - - if len(vlm_config.image_input_shape) == 3: - raise ValueError( - "The model is configured to accept image features rather than " - "pixel values, and thus does not support image inputs") - - batch_size, num_channels, height, width = vlm_config.image_input_shape - feature_size = vlm_config.image_feature_size - - if num_channels == 1: - image_format = "L" - elif num_channels == 3: - image_format = "RGB" - elif num_channels == 4: - image_format = "RGBA" - else: - msg = f"Unsupported number of channels ({num_channels})" - raise NotImplementedError(msg) + config = getattr(self.engine.engine, "vision_language_config", None) + if not isinstance(config, VisionLanguageConfig): + raise ValueError("GPT-4 with Vision API is only supported for " + "vision language models.") + + tokenizer = self.tokenizer content_texts: List[str] = [] content_images: List[MultiModalData] = [] @@ -78,21 +101,12 @@ async def parse_chat_message_image_input( content_texts.append(part.text) if isinstance(part, ChatCompletionImageContentPart): - with await get_image_async(part.image_url.url) as image: - image = image.convert(image_format).resize((height, width)) - image_arr = np.array(image, copy=True) - - image_tensor = torch.as_tensor(image_arr) \ - .view(batch_size, height, width, num_channels) \ - .permute((0, 3, 1, 2)) \ - .to(torch.float16) - - content_texts.append("" * feature_size) - content_images.append( - MultiModalData( - type=MultiModalData.Type.IMAGE, - data=image_tensor, - )) + image = await get_image_async(part.image_url.url) + image_idx = len(content_images) + text, data = parse_image(config, tokenizer, image, image_idx) + + content_texts.append(text) + content_images.append(data) if part.image_url.detail != "auto": logger.info("content[%s].image_url.detail is ignored", i) @@ -135,12 +149,9 @@ async def create_chat_completion( for m in request.messages: text, images = await self.parse_chat_message_content(m.content) + cm = ChatMessage(role=m.role, content="\n".join(text)) - conversation.append( - ChatMessage( - role=m.role, - content="\n".join(text), - )) + conversation.append(cm) multi_modal_datas.extend(images) if len(multi_modal_datas) == 0: @@ -151,9 +162,7 @@ async def create_chat_completion( raise NotImplementedError("Multiple image input not supported") prompt = self.tokenizer.apply_chat_template( - conversation=[ - message.model_dump() for message in conversation - ], + conversation=[msg.model_dump() for msg in conversation], tokenize=False, add_generation_prompt=request.add_generation_prompt, ) @@ -190,7 +199,7 @@ async def create_chat_completion( # Streaming response if request.stream: - return self.chat_completion_stream_generator( + return await self.chat_completion_stream_generator( request, conversation, result_generator, diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index 8f69388c0251..00352adb7ba9 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -27,10 +27,8 @@ class LoRA: class OpenAIServing: - def __init__(self, - engine: AsyncLLMEngine, - served_model: str, - lora_modules=Optional[List[LoRA]]): + def __init__(self, engine: AsyncLLMEngine, served_model: str, + lora_modules: Optional[List[LoRA]]): self.engine = engine self.served_model = served_model if lora_modules is None: From a44d7d169ca72039baa8ecc1d49cff1ac2c44253 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 11 Apr 2024 09:53:16 +0000 Subject: [PATCH 07/45] Fix errors in CI/CD - Incorrect loading of config (also rename `openai_api` to `image_openai`) - Incorrect await of stream generator --- vllm/config.py | 16 ++++++++++++--- vllm/engine/arg_utils.py | 26 ++++++++++++++++--------- vllm/entrypoints/openai/serving_chat.py | 4 ++-- 3 files changed, 32 insertions(+), 14 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 73847f10ce3e..f93b111e2ce3 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -893,8 +893,8 @@ class ImageInputType(enum.Enum): PIXEL_VALUES = enum.auto() IMAGE_FEATURES = enum.auto() - class OpenAIVisionAPI(enum.Enum): - """Specifies how the model supports + class ImageOpenAI(enum.Enum): + """Specifies how the model implements `OpenAI's GPT-4 with Vision API `_. """ UNSUPPORTED = OpenAIVisionAdapterForNoImage() @@ -910,7 +910,7 @@ class OpenAIVisionAPI(enum.Enum): image_input_shape: tuple image_feature_size: int - openai_api: OpenAIVisionAPI = OpenAIVisionAPI.SINGLE_IMAGE + image_openai: ImageOpenAI = ImageOpenAI.SINGLE_IMAGE @classmethod def get_image_input_enum_type(cls, value: str) -> ImageInputType: @@ -922,6 +922,16 @@ def get_image_input_enum_type(cls, value: str) -> ImageInputType: f"Expecting to choose from " f"{[x.name for x in cls.ImageInputType]}.") from e + @classmethod + def get_image_openai_enum_type(cls, value: str) -> ImageOpenAI: + """Get the GPT-4 with Vision API implementation from a string.""" + try: + return cls.ImageOpenAI[value.upper()] + except KeyError as e: + raise ValueError(f"{value} is not a valid choice. " + f"Expecting to choose from " + f"{[x.name for x in cls.ImageOpenAI]}.") from e + _STR_DTYPE_TO_TORCH_DTYPE = { "half": torch.float16, diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index daefddc01b43..80a7c7871e23 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -63,6 +63,7 @@ class EngineArgs: image_token_id: Optional[int] = None image_input_shape: Optional[str] = None image_feature_size: Optional[int] = None + image_openai: str = VisionLanguageConfig.ImageOpenAI.SINGLE_IMAGE.name scheduler_delay_factor: float = 0.0 enable_chunked_prefill: bool = False @@ -354,15 +355,14 @@ def add_cli_args( choices=["auto", "cuda", "neuron", "cpu"], help='Device type for vLLM execution.') # Related to Vision-language models such as llava - parser.add_argument( - '--image-input-type', - type=str, - default=None, - choices=[ - t.name.lower() for t in VisionLanguageConfig.ImageInputType - ], - help=('The image input type passed into vLLM. ' - 'Should be one of "pixel_values" or "image_features".')) + parser.add_argument('--image-input-type', + type=str, + default=None, + choices=[ + t.name.lower() + for t in VisionLanguageConfig.ImageInputType + ], + help=('The image input type passed into vLLM.')) parser.add_argument('--image-token-id', type=int, default=None, @@ -378,6 +378,12 @@ def add_cli_args( type=int, default=None, help=('The image feature size along the context dimension.')) + parser.add_argument( + '--image-openai', + type=str, + default=VisionLanguageConfig.ImageOpenAI.SINGLE_IMAGE.name.lower(), + choices=[t.name.lower() for t in VisionLanguageConfig.ImageOpenAI], + help=('Specifies how the model implements GPT-4 with Vision API.')) parser.add_argument( '--scheduler-delay-factor', type=float, @@ -477,6 +483,8 @@ def create_engine_config(self, ) -> EngineConfig: image_token_id=self.image_token_id, image_input_shape=str_to_int_tuple(self.image_input_shape), image_feature_size=self.image_feature_size, + image_openai=VisionLanguageConfig.get_image_openai_enum_type( + self.image_openai), ) else: vision_language_config = None diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index d804284f38b8..189807ed0c7e 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -33,7 +33,7 @@ def parse_image( image: Image.Image, image_idx: int, ) -> Tuple[str, MultiModalData]: - text = config.openai_api.value \ + text = config.image_openai.value \ .get_image_token_text(config, tokenizer, image_idx) if len(config.image_input_shape) == 3: @@ -199,7 +199,7 @@ async def create_chat_completion( # Streaming response if request.stream: - return await self.chat_completion_stream_generator( + return self.chat_completion_stream_generator( request, conversation, result_generator, From 44796056c3638fcee42c54845c9b60c0b451d335 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 03:26:43 +0000 Subject: [PATCH 08/45] Fix some type errors along the way --- vllm/entrypoints/openai/protocol.py | 16 +++++++-------- vllm/entrypoints/openai/serving_chat.py | 20 +++++++++---------- vllm/entrypoints/openai/serving_completion.py | 11 +++++----- 3 files changed, 24 insertions(+), 23 deletions(-) diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index b06d54ce94cf..4d6c5606cf93 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -394,8 +394,8 @@ class CompletionResponseChoice(BaseModel): index: int text: str logprobs: Optional[LogProbs] = None - finish_reason: Optional[Literal["stop", "length"]] = None - stop_reason: Union[None, int, str] = Field( + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = Field( default=None, description=( "The stop string or token id that caused the completion " @@ -417,8 +417,8 @@ class CompletionResponseStreamChoice(BaseModel): index: int text: str logprobs: Optional[LogProbs] = None - finish_reason: Optional[Literal["stop", "length"]] = None - stop_reason: Union[None, int, str] = Field( + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = Field( default=None, description=( "The stop string or token id that caused the completion " @@ -445,8 +445,8 @@ class ChatCompletionResponseChoice(BaseModel): index: int message: ChatMessage logprobs: Optional[LogProbs] = None - finish_reason: Optional[Literal["stop", "length"]] = None - stop_reason: Union[None, int, str] = None + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = None class ChatCompletionResponse(BaseModel): @@ -467,8 +467,8 @@ class ChatCompletionResponseStreamChoice(BaseModel): index: int delta: DeltaMessage logprobs: Optional[LogProbs] = None - finish_reason: Optional[Literal["stop", "length"]] = None - stop_reason: Union[None, int, str] = None + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = None class ChatCompletionStreamResponse(BaseModel): diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 1941bcdcbbdf..39d613fcbc99 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -231,17 +231,18 @@ async def chat_completion_stream_generator( parsed_conversation: List[ChatMessage], result_generator: AsyncIterator[RequestOutput], request_id: str, - ) -> Union[ErrorResponse, AsyncGenerator[str, None]]: - + ) -> AsyncGenerator[str, None]: model_name = request.model created_time = int(time.time()) chunk_object_type = "chat.completion.chunk" first_iteration = True # Send response for each token for each request.n (index) - previous_texts = [""] * request.n - previous_num_tokens = [0] * request.n - finish_reason_sent = [False] * request.n + num_choices = 1 if request.n is None else request.n + previous_texts = [""] * num_choices + previous_num_tokens = [0] * num_choices + finish_reason_sent = [False] * num_choices + try: async for res in result_generator: res: RequestOutput @@ -252,7 +253,7 @@ async def chat_completion_stream_generator( # Send first response for each request.n (index) with # the role role = self.get_chat_request_role(request) - for i in range(request.n): + for i in range(num_choices): choice_data = ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage(role=role), @@ -277,19 +278,19 @@ async def chat_completion_stream_generator( last_msg_content = parsed_conversation[-1].content if last_msg_content: - for i in range(request.n): + for i in range(num_choices): choice_data = ( ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage( content=last_msg_content), + logprobs=None, finish_reason=None)) chunk = ChatCompletionStreamResponse( id=request_id, object=chunk_object_type, created=created_time, choices=[choice_data], - logprobs=None, model=model_name) data = chunk.model_dump_json( exclude_unset=True) @@ -376,10 +377,9 @@ async def chat_completion_full_generator( result_generator: AsyncIterator[RequestOutput], request_id: str, ) -> Union[ErrorResponse, ChatCompletionResponse]: - model_name = request.model created_time = int(time.time()) - final_res: RequestOutput = None + final_res: Optional[RequestOutput] = None async for res in result_generator: if await raw_request.is_disconnected(): diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index c1f1744a118b..de91c712575f 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -180,7 +180,7 @@ async def create_completion(self, request: CompletionRequest, num_prompts=len(prompts)) # Non-streaming response - final_res_batch: RequestOutput = [None] * len(prompts) + final_res_batch: List[Optional[RequestOutput]] = [None] * len(prompts) try: async for i, res in result_generator: if await raw_request.is_disconnected(): @@ -217,9 +217,10 @@ async def completion_stream_generator( model_name: str, num_prompts: int, ) -> AsyncGenerator[str, None]: - previous_texts = [""] * request.n * num_prompts - previous_num_tokens = [0] * request.n * num_prompts - has_echoed = [False] * request.n * num_prompts + num_choices = 1 if request.n is None else request.n + previous_texts = [""] * num_choices * num_prompts + previous_num_tokens = [0] * num_choices * num_prompts + has_echoed = [False] * num_choices * num_prompts try: async for prompt_idx, res in result_generator: @@ -230,7 +231,7 @@ async def completion_stream_generator( raise StopAsyncIteration() for output in res.outputs: - i = output.index + prompt_idx * request.n + i = output.index + prompt_idx * num_choices # TODO(simon): optimize the performance by avoiding full # text O(n^2) sending. From 20852d9bd4f7c8e327e9512af097ea1899dbc096 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 04:23:01 +0000 Subject: [PATCH 09/45] Improve async behaviour of loading images - Also, use the type definitions from `openai` directly --- vllm/entrypoints/openai/protocol.py | 58 +-------- vllm/entrypoints/openai/serving_chat.py | 158 +++++++++++++----------- 2 files changed, 87 insertions(+), 129 deletions(-) diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index 4d6c5606cf93..8f32428ec288 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -4,6 +4,7 @@ from typing import Dict, List, Literal, Optional, Union import torch +from openai.types.chat import ChatCompletionMessageParam from pydantic import BaseModel, Field, conint, model_validator from vllm.sampling_params import SamplingParams @@ -59,65 +60,10 @@ class ResponseFormat(BaseModel): type: str = Literal["text", "json_object"] -class ChatCompletionSystemMessage(BaseModel): - role: Literal["system"] - content: str - name: Optional[str] = None - - -class ChatCompletionTextContentPart(BaseModel): - type: Literal['text'] - text: str - - -class ChatCompletionImageURL(BaseModel): - url: str - detail: Literal["low", "high", "auto"] = "auto" - - -class ChatCompletionImageContentPart(BaseModel): - type: Literal['image_url'] - image_url: ChatCompletionImageURL - - -class ChatCompletionUserMessage(BaseModel): - role: Literal["user"] - content: Union[str, List[Union[ChatCompletionTextContentPart, - ChatCompletionImageContentPart]]] - name: Optional[str] = None - - -class ChatCompletionAssistantFunctionTool(BaseModel): - name: str - arguments: str - - -class ChatCompletionAssistantFunctionToolCall(BaseModel): - id: str - type: Literal["function"] - function: ChatCompletionAssistantFunctionTool - - -class ChatCompletionAssistantMessage(BaseModel): - role: Literal["assistant"] - content: Optional[str] = None - name: Optional[str] = None - tool_calls: Optional[List[ChatCompletionAssistantFunctionToolCall]] = None - - -class ChatCompletionToolMessage(BaseModel): - role: Literal["tool"] - content: str - tool_call_id: str - - class ChatCompletionRequest(BaseModel): # Ordered by official OpenAI API documentation # https://platform.openai.com/docs/api-reference/chat/create - messages: List[Union[ChatCompletionSystemMessage, - ChatCompletionUserMessage, - ChatCompletionAssistantMessage, - ChatCompletionToolMessage]] + messages: List[ChatCompletionMessageParam] model: str frequency_penalty: Optional[float] = 0.0 logit_bias: Optional[Dict[str, float]] = None diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 39d613fcbc99..d42757651bfb 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -1,20 +1,20 @@ import codecs import time -from typing import AsyncGenerator, AsyncIterator, List, Optional, Tuple, Union +from typing import (AsyncGenerator, AsyncIterator, Awaitable, Iterable, List, + Optional, Tuple, TypedDict, Union, final) import numpy as np import torch from fastapi import Request -from PIL import Image -from transformers import PreTrainedTokenizerBase +from openai.types.chat import (ChatCompletionContentPartParam, + ChatCompletionRole) from vllm.config import VisionLanguageConfig from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.entrypoints.openai.protocol import ( - ChatCompletionImageContentPart, ChatCompletionRequest, - ChatCompletionResponse, ChatCompletionResponseChoice, - ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, - ChatCompletionTextContentPart, ChatMessage, DeltaMessage, ErrorResponse, + ChatCompletionRequest, ChatCompletionResponse, + ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice, + ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse, UsageInfo) from vllm.entrypoints.openai.serving_engine import LoRA, OpenAIServing from vllm.logger import init_logger @@ -27,14 +27,8 @@ logger = init_logger(__name__) -def parse_image( - config: VisionLanguageConfig, - tokenizer: PreTrainedTokenizerBase, - image: Image.Image, - image_idx: int, -) -> Tuple[str, MultiModalData]: - text = config.image_openai.value \ - .get_image_token_text(config, tokenizer, image_idx) +async def get_and_parse_image(image_url: str, + config: VisionLanguageConfig) -> MultiModalData: if len(config.image_input_shape) == 3: raise ValueError( @@ -53,7 +47,7 @@ def parse_image( msg = f"Unsupported number of channels ({num_channels})" raise NotImplementedError(msg) - with image: + with await get_image_async(image_url) as image: image = image.convert(image_format).resize((height, width)) image_arr = np.array(image, copy=True) @@ -62,9 +56,13 @@ def parse_image( .permute((0, 3, 1, 2)) \ .to(torch.float16) - data = MultiModalData(type=MultiModalData.Type.IMAGE, data=image_tensor) + return MultiModalData(type=MultiModalData.Type.IMAGE, data=image_tensor) - return text, data + +@final +class ConversationMessage(TypedDict): + role: str + content: str class OpenAIServingChat(OpenAIServing): @@ -81,50 +79,59 @@ def __init__(self, self.response_role = response_role self._load_chat_template(chat_template) - async def parse_chat_message_image_input( + def _parse_chat_message_image_input( self, - content: List[Union[ChatCompletionTextContentPart, - ChatCompletionImageContentPart]], - ) -> Tuple[List[str], List[MultiModalData]]: + role: ChatCompletionRole, + content: Iterable[ChatCompletionContentPartParam], + ) -> Tuple[List[ConversationMessage], List[Awaitable[MultiModalData]]]: + """Parse image input defined by OpenAI Chat Completions API.""" config = getattr(self.engine.engine, "vision_language_config", None) if not isinstance(config, VisionLanguageConfig): raise ValueError("GPT-4 with Vision API is only supported for " "vision language models.") tokenizer = self.tokenizer + assert tokenizer is not None - content_texts: List[str] = [] - content_images: List[MultiModalData] = [] + texts: List[str] = [] + image_futures: List[Awaitable[MultiModalData]] = [] for i, part in enumerate(content): - if isinstance(part, ChatCompletionTextContentPart): - content_texts.append(part.text) + if part["type"] == "text": + text = part["text"] - if isinstance(part, ChatCompletionImageContentPart): - image = await get_image_async(part.image_url.url) - image_idx = len(content_images) - text, data = parse_image(config, tokenizer, image, image_idx) + texts.append(text) + elif part["type"] == "image_url": + image_url = part["image_url"] + if image_url.get("detail", "auto") != "auto": + logger.info("content[%s].image_url.detail is ignored", i) - content_texts.append(text) - content_images.append(data) + text = config.image_openai.value.get_image_token_text( + config, tokenizer, image_idx=len(image_futures)) + image_future = get_and_parse_image(image_url["url"], config) - if part.image_url.detail != "auto": - logger.info("content[%s].image_url.detail is ignored", i) + texts.append(text) + image_futures.append(image_future) + else: + raise NotImplementedError(f"Unknown part type: {part['type']}") + + messages = [ConversationMessage(role=role, content="\n".join(text))] + data_futures = image_futures - return content_texts, content_images + return messages, data_futures - async def parse_chat_message_content( + def _parse_chat_message_content( self, + role: ChatCompletionRole, content: Optional[Union[str, - List[Union[ChatCompletionTextContentPart, - ChatCompletionImageContentPart]]]], - ) -> Tuple[List[str], List[MultiModalData]]: + Iterable[ChatCompletionContentPartParam]]], + ) -> Tuple[List[ConversationMessage], List[Awaitable[MultiModalData]]]: if content is None: return [], [] if isinstance(content, str): - return [content], [] + return [ConversationMessage(role=role, content=content)], [] - return await self.parse_chat_message_image_input(content) + return self._parse_chat_message_image_input(role, content) async def create_chat_completion( self, request: ChatCompletionRequest, raw_request: Request @@ -144,25 +151,18 @@ async def create_chat_completion( return error_check_ret try: - conversation: List[ChatMessage] = [] - multi_modal_datas: List[MultiModalData] = [] + conversation: List[ConversationMessage] = [] + multi_modal_futures: List[Awaitable[MultiModalData]] = [] for m in request.messages: - text, images = await self.parse_chat_message_content(m.content) - cm = ChatMessage(role=m.role, content="\n".join(text)) + messages, futures = self._parse_chat_message_content( + m["role"], m["content"]) - conversation.append(cm) - multi_modal_datas.extend(images) - - if len(multi_modal_datas) == 0: - multi_modal_data = None - elif len(multi_modal_datas) == 1: - multi_modal_data, = multi_modal_datas - else: - raise NotImplementedError("Multiple image input not supported") + conversation.extend(messages) + multi_modal_futures.extend(futures) prompt = self.tokenizer.apply_chat_template( - conversation=[msg.model_dump() for msg in conversation], + conversation=conversation, tokenize=False, add_generation_prompt=request.add_generation_prompt, ) @@ -171,6 +171,18 @@ async def create_chat_completion( f"Error in applying chat template from request: {str(e)}") return self.create_error_response(str(e)) + try: + if len(multi_modal_futures) == 0: + multi_modal_data = None + elif len(multi_modal_futures) == 1: + multi_modal_data = await multi_modal_futures[0] + else: + # multi_modal_datas = await asyncio.gather(*multi_modal_futures) + raise NotImplementedError("Multiple image input not supported") + except Exception as e: + logger.error(f"Error in loading multi-modal data: {str(e)}") + return self.create_error_response(str(e)) + request_id = f"cmpl-{random_uuid()}" try: # Tokenize/detokenize depending on prompt format (string/token list) @@ -210,8 +222,8 @@ async def create_chat_completion( try: return await self.chat_completion_full_generator( request, - raw_request, conversation, + raw_request, result_generator, request_id, ) @@ -223,12 +235,12 @@ def get_chat_request_role(self, request: ChatCompletionRequest) -> str: if request.add_generation_prompt: return self.response_role else: - return request.messages[-1].role + return request.messages[-1]["role"] async def chat_completion_stream_generator( self, request: ChatCompletionRequest, - parsed_conversation: List[ChatMessage], + conversation: List[ConversationMessage], result_generator: AsyncIterator[RequestOutput], request_id: str, ) -> AsyncGenerator[str, None]: @@ -272,10 +284,9 @@ async def chat_completion_stream_generator( # last message if request.echo: last_msg_content = "" - if (parsed_conversation - and parsed_conversation[-1].content - and parsed_conversation[-1].role == role): - last_msg_content = parsed_conversation[-1].content + if (conversation and conversation[-1]["content"] + and conversation[-1]["role"] == role): + last_msg_content = conversation[-1]["content"] if last_msg_content: for i in range(num_choices): @@ -372,8 +383,8 @@ async def chat_completion_stream_generator( async def chat_completion_full_generator( self, request: ChatCompletionRequest, + conversation: List[ConversationMessage], raw_request: Request, - parsed_conversation: List[ChatMessage], result_generator: AsyncIterator[RequestOutput], request_id: str, ) -> Union[ErrorResponse, ChatCompletionResponse]: @@ -416,9 +427,9 @@ async def chat_completion_full_generator( if request.echo: last_msg_content = "" - if (parsed_conversation and parsed_conversation[-1].content - and parsed_conversation[-1].role == role): - last_msg_content = parsed_conversation[-1].content + if (conversation and conversation[-1]["content"] + and conversation[-1]["role"] == role): + last_msg_content = conversation[-1]["content"] for choice in choices: full_message = last_msg_content + choice.message.content @@ -443,23 +454,24 @@ async def chat_completion_full_generator( return response def _load_chat_template(self, chat_template): + tokenizer = self.tokenizer + assert tokenizer is not None + if chat_template is not None: try: with open(chat_template, "r") as f: - self.tokenizer.chat_template = f.read() + tokenizer.chat_template = f.read() except OSError: # If opening a file fails, set chat template to be args to # ensure we decode so our escape are interpreted correctly - self.tokenizer.chat_template = codecs.decode( + tokenizer.chat_template = codecs.decode( chat_template, "unicode_escape") logger.info( - f"Using supplied chat template:\n{self.tokenizer.chat_template}" - ) - elif self.tokenizer.chat_template is not None: + f"Using supplied chat template:\n{tokenizer.chat_template}") + elif tokenizer.chat_template is not None: logger.info( - f"Using default chat template:\n{self.tokenizer.chat_template}" - ) + f"Using default chat template:\n{tokenizer.chat_template}") else: logger.warning( "No chat template provided. Chat API will not work.") From ce770f4ef2e99c0b6256ea8e87d17f664e9ea500 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 06:00:00 +0000 Subject: [PATCH 10/45] Use discriminated union in prompt parsing --- vllm/entrypoints/openai/serving_completion.py | 76 ++++++++++++------- 1 file changed, 48 insertions(+), 28 deletions(-) diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index e24aa2489a80..8db79123084e 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -1,6 +1,6 @@ import time from typing import (AsyncGenerator, AsyncIterator, Callable, Dict, List, - Optional, Tuple) + Literal, Optional, Tuple, TypedDict, Union) from fastapi import Request @@ -26,27 +26,45 @@ [TypeTokenIDs, TypeTopLogProbs, Optional[int], int], LogProbs] -def parse_prompt_format(prompt) -> Tuple[bool, list]: +class PromptStrings(TypedDict): + prompt: str + is_tokens: Literal[False] + + +class PromptTokens(TypedDict): + prompt: List[int] + is_tokens: Literal[True] + + +def _parse_prompt_element_format( + elem: Union[str, int, + List[int]]) -> Union[PromptStrings, PromptTokens]: + if isinstance(elem, str): + # case 2: array of strings + return PromptStrings(prompt=elem, is_tokens=False) + if isinstance(elem, int): + # case 3: array of tokens + return PromptTokens(prompt=[elem], is_tokens=True) + if isinstance(elem, list): + # case 4: array of token arrays + return PromptTokens(prompt=elem, is_tokens=True) + + +def parse_prompt_format( + prompt: Union[str, List[str], List[int], List[List[int]]] +) -> List[Union[PromptStrings, PromptTokens]]: # get the prompt, openai supports the following # "a string, array of strings, array of tokens, or array of token arrays." - prompt_is_tokens = False - prompts = [prompt] # case 1: a string + + if isinstance(prompt, str): + # case 1: a string + return [_parse_prompt_element_format(prompt)] + if isinstance(prompt, list): - if len(prompt) == 0: - raise ValueError("please provide at least one prompt") - elif isinstance(prompt[0], str): - prompt_is_tokens = False - prompts = prompt # case 2: array of strings - elif isinstance(prompt[0], int): - prompt_is_tokens = True - prompts = [prompt] # case 3: array of tokens - elif isinstance(prompt[0], list) and isinstance(prompt[0][0], int): - prompt_is_tokens = True - prompts = prompt # case 4: array of token arrays - else: - raise ValueError("prompt must be a string, array of strings, " - "array of tokens, or array of token arrays") - return prompt_is_tokens, prompts + return [_parse_prompt_element_format(elem) for elem in prompt] + + raise ValueError("prompt must be a string, array of strings, " + "array of tokens, or array of token arrays") class OpenAIServingCompletion(OpenAIServing): @@ -84,7 +102,7 @@ async def create_completion(self, request: CompletionRequest, created_time = int(time.time()) # Schedule the request and get the result generator. - generators = [] + generators: List[AsyncIterator[RequestOutput]] = [] try: sampling_params = request.to_sampling_params() lora_request = self._maybe_get_lora(request) @@ -96,21 +114,23 @@ async def create_completion(self, request: CompletionRequest, sampling_params.logits_processors = [] sampling_params.logits_processors.append( guided_decode_logit_processor) - prompt_is_tokens, prompts = parse_prompt_format(request.prompt) + + prompts = parse_prompt_format(request.prompt) + truncate_prompt_tokens = sampling_params.truncate_prompt_tokens for i, prompt in enumerate(prompts): - if prompt_is_tokens: + if prompt["is_tokens"]: prompt_formats = self._validate_prompt_and_tokenize( request, - prompt_ids=prompt, - truncate_prompt_tokens=sampling_params. - truncate_prompt_tokens) + prompt_ids=prompt["prompt"], + truncate_prompt_tokens=truncate_prompt_tokens, + ) else: prompt_formats = self._validate_prompt_and_tokenize( request, - prompt=prompt, - truncate_prompt_tokens=sampling_params. - truncate_prompt_tokens) + prompt=prompt["prompt"], + truncate_prompt_tokens=truncate_prompt_tokens, + ) prompt_ids, prompt_text = prompt_formats generators.append( From 6b016bc537e5622995a161ee25e6fc1c91fce396 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 03:26:43 +0000 Subject: [PATCH 11/45] Fix some type errors along the way --- vllm/entrypoints/openai/protocol.py | 20 +++++++++--------- vllm/entrypoints/openai/serving_chat.py | 21 ++++++++++--------- vllm/entrypoints/openai/serving_completion.py | 14 +++++++------ vllm/entrypoints/openai/serving_engine.py | 4 ++-- 4 files changed, 31 insertions(+), 28 deletions(-) diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index f94d22d279cc..c06fc027d3c8 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -30,7 +30,7 @@ class ModelPermission(BaseModel): allow_fine_tuning: bool = False organization: str = "*" group: Optional[str] = None - is_blocking: str = False + is_blocking: bool = False class ModelCard(BaseModel): @@ -56,7 +56,7 @@ class UsageInfo(BaseModel): class ResponseFormat(BaseModel): # type must be "json_object" or "text" - type: str = Literal["text", "json_object"] + type: Literal["text", "json_object"] class ChatCompletionRequest(BaseModel): @@ -339,8 +339,8 @@ class CompletionResponseChoice(BaseModel): index: int text: str logprobs: Optional[LogProbs] = None - finish_reason: Optional[Literal["stop", "length"]] = None - stop_reason: Union[None, int, str] = Field( + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = Field( default=None, description=( "The stop string or token id that caused the completion " @@ -362,8 +362,8 @@ class CompletionResponseStreamChoice(BaseModel): index: int text: str logprobs: Optional[LogProbs] = None - finish_reason: Optional[Literal["stop", "length"]] = None - stop_reason: Union[None, int, str] = Field( + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = Field( default=None, description=( "The stop string or token id that caused the completion " @@ -390,8 +390,8 @@ class ChatCompletionResponseChoice(BaseModel): index: int message: ChatMessage logprobs: Optional[LogProbs] = None - finish_reason: Optional[Literal["stop", "length"]] = None - stop_reason: Union[None, int, str] = None + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = None class ChatCompletionResponse(BaseModel): @@ -412,8 +412,8 @@ class ChatCompletionResponseStreamChoice(BaseModel): index: int delta: DeltaMessage logprobs: Optional[LogProbs] = None - finish_reason: Optional[Literal["stop", "length"]] = None - stop_reason: Union[None, int, str] = None + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = None class ChatCompletionStreamResponse(BaseModel): diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index a03c5dc88108..1b0758175416 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -102,18 +102,19 @@ def get_chat_request_role(self, request: ChatCompletionRequest) -> str: async def chat_completion_stream_generator( self, request: ChatCompletionRequest, - result_generator: AsyncIterator[RequestOutput], request_id: str - ) -> Union[ErrorResponse, AsyncGenerator[str, None]]: - + result_generator: AsyncIterator[RequestOutput], + request_id: str) -> AsyncGenerator[str, None]: model_name = request.model created_time = int(time.time()) chunk_object_type = "chat.completion.chunk" first_iteration = True # Send response for each token for each request.n (index) - previous_texts = [""] * request.n - previous_num_tokens = [0] * request.n - finish_reason_sent = [False] * request.n + num_choices = 1 if request.n is None else request.n + previous_texts = [""] * num_choices + previous_num_tokens = [0] * num_choices + finish_reason_sent = [False] * num_choices + try: async for res in result_generator: res: RequestOutput @@ -124,7 +125,7 @@ async def chat_completion_stream_generator( # Send first response for each request.n (index) with # the role role = self.get_chat_request_role(request) - for i in range(request.n): + for i in range(num_choices): choice_data = ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage(role=role), @@ -151,19 +152,19 @@ async def chat_completion_stream_generator( last_msg_content = request.messages[-1]["content"] if last_msg_content: - for i in range(request.n): + for i in range(num_choices): choice_data = ( ChatCompletionResponseStreamChoice( index=i, delta=DeltaMessage( content=last_msg_content), + logprobs=None, finish_reason=None)) chunk = ChatCompletionStreamResponse( id=request_id, object=chunk_object_type, created=created_time, choices=[choice_data], - logprobs=None, model=model_name) data = chunk.model_dump_json( exclude_unset=True) @@ -249,7 +250,7 @@ async def chat_completion_full_generator( model_name = request.model created_time = int(time.time()) - final_res: RequestOutput = None + final_res: Optional[RequestOutput] = None async for res in result_generator: if await raw_request.is_disconnected(): diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index 8db79123084e..7fb47ffdc855 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -164,7 +164,7 @@ async def create_completion(self, request: CompletionRequest, num_prompts=len(prompts)) # Non-streaming response - final_res_batch: RequestOutput = [None] * len(prompts) + final_res_batch: List[Optional[RequestOutput]] = [None] * len(prompts) try: async for i, res in result_generator: if await raw_request.is_disconnected(): @@ -201,9 +201,10 @@ async def completion_stream_generator( model_name: str, num_prompts: int, ) -> AsyncGenerator[str, None]: - previous_texts = [""] * request.n * num_prompts - previous_num_tokens = [0] * request.n * num_prompts - has_echoed = [False] * request.n * num_prompts + num_choices = 1 if request.n is None else request.n + previous_texts = [""] * num_choices * num_prompts + previous_num_tokens = [0] * num_choices * num_prompts + has_echoed = [False] * num_choices * num_prompts try: async for prompt_idx, res in result_generator: @@ -214,7 +215,7 @@ async def completion_stream_generator( raise StopAsyncIteration() for output in res.outputs: - i = output.index + prompt_idx * request.n + i = output.index + prompt_idx * num_choices # TODO(simon): optimize the performance by avoiding full # text O(n^2) sending. @@ -295,9 +296,10 @@ def request_output_to_completion_response( created_time: int, model_name: str, ) -> CompletionResponse: - choices = [] + choices: List[CompletionResponseChoice] = [] num_prompt_tokens = 0 num_generated_tokens = 0 + for final_res in final_res_batch: assert final_res is not None prompt_token_ids = final_res.prompt_token_ids diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index 77a568b56403..f785fb524388 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -151,9 +151,9 @@ def create_streaming_error_response( async def _check_model(self, request) -> Optional[ErrorResponse]: if request.model == self.served_model: - return + return None if request.model in [lora.lora_name for lora in self.lora_requests]: - return + return None return self.create_error_response( message=f"The model `{request.model}` does not exist.", err_type="NotFoundError", From 7620354628a6a63dcf790dd5d7f0521bebcd2f5e Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 06:18:56 +0000 Subject: [PATCH 12/45] Some more fixes --- vllm/entrypoints/openai/serving_chat.py | 2 +- vllm/entrypoints/openai/serving_engine.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 1b0758175416..f189fa27d582 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -260,7 +260,7 @@ async def chat_completion_full_generator( final_res = res assert final_res is not None - choices = [] + choices: List[ChatCompletionResponseChoice] = [] role = self.get_chat_request_role(request) for output in final_res.outputs: diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index f785fb524388..a215e498ae63 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -30,7 +30,7 @@ class OpenAIServing: def __init__(self, engine: AsyncLLMEngine, served_model: str, - lora_modules=Optional[List[LoRA]]): + lora_modules: Optional[List[LoRA]]): self.engine = engine self.served_model = served_model if lora_modules is None: From 7c3e6d91b332227eb5892de2ba10cc97b6167499 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 06:24:34 +0000 Subject: [PATCH 13/45] Apply formatter --- vllm/entrypoints/openai/serving_engine.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index a215e498ae63..b2d055bea352 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -27,9 +27,7 @@ class LoRA: class OpenAIServing: - def __init__(self, - engine: AsyncLLMEngine, - served_model: str, + def __init__(self, engine: AsyncLLMEngine, served_model: str, lora_modules: Optional[List[LoRA]]): self.engine = engine self.served_model = served_model From 9925dcb8ee306eba0bf36f6da40377577945114e Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 07:35:32 +0000 Subject: [PATCH 14/45] Move `openai` to common requirements --- requirements-common.txt | 9 ++++++--- requirements-dev.txt | 1 - 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/requirements-common.txt b/requirements-common.txt index cb766b2fe3c2..c2a8c13a4a9c 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -8,8 +8,11 @@ py-cpuinfo transformers >= 4.39.1 # Required for StarCoder2 & Llava. fastapi uvicorn[standard] -pydantic >= 2.0 # Required for OpenAI server. -pillow # Required for OpenAI image input prometheus_client >= 0.18.0 tiktoken == 0.6.0 # Required for DBRX tokenizer -outlines == 0.0.34 # Requires torch >= 2.1.0 \ No newline at end of file +outlines == 0.0.34 # Requires torch >= 2.1.0 + +# OpenAI server +openai +pydantic >= 2.0 +pillow diff --git a/requirements-dev.txt b/requirements-dev.txt index b806c924a4e0..54e8de8d0e96 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -20,7 +20,6 @@ pytest-rerunfailures pytest-shard httpx einops # required for MPT -openai requests ray peft From ceb4e353118909d33dfd42186e5ec69ee848249d Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 07:58:07 +0000 Subject: [PATCH 15/45] Fix typo in `_parse_chat_message_image_input` --- vllm/entrypoints/openai/serving_chat.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index d42757651bfb..ef2ae49e10c7 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -59,7 +59,7 @@ async def get_and_parse_image(image_url: str, return MultiModalData(type=MultiModalData.Type.IMAGE, data=image_tensor) -@final +@final # So that it should be compatible with Dict[str, str] class ConversationMessage(TypedDict): role: str content: str @@ -115,7 +115,7 @@ def _parse_chat_message_image_input( else: raise NotImplementedError(f"Unknown part type: {part['type']}") - messages = [ConversationMessage(role=role, content="\n".join(text))] + messages = [ConversationMessage(role=role, content="\n".join(texts))] data_futures = image_futures return messages, data_futures From 7bdc84eb4dbf482d7540d56289712b516ebd4451 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 09:55:44 +0000 Subject: [PATCH 16/45] Refactor prompt parsing so that it can be shared between Chat Completions API and legacy Completions API --- vllm/entrypoints/openai/serving_chat.py | 23 ++-- vllm/entrypoints/openai/serving_completion.py | 89 +++----------- vllm/entrypoints/openai/serving_engine.py | 116 +++++++++++++++++- 3 files changed, 145 insertions(+), 83 deletions(-) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index f189fa27d582..58856bd96f9f 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -10,7 +10,8 @@ ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice, ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse, UsageInfo) -from vllm.entrypoints.openai.serving_engine import LoRA, OpenAIServing +from vllm.entrypoints.openai.serving_engine import (LoRAModulePath, + OpenAIServing) from vllm.logger import init_logger from vllm.model_executor.guided_decoding import ( get_guided_decoding_logits_processor) @@ -26,7 +27,7 @@ def __init__(self, engine: AsyncLLMEngine, served_model: str, response_role: str, - lora_modules: Optional[List[LoRA]] = None, + lora_modules: Optional[List[LoRAModulePath]] = None, chat_template=None): super().__init__(engine=engine, served_model=served_model, @@ -63,9 +64,6 @@ async def create_chat_completion( request_id = f"cmpl-{random_uuid()}" try: - # Tokenize/detokenize depending on prompt format (string/token list) - prompt_ids, prompt_text = self._validate_prompt_and_tokenize( - request, prompt=prompt) sampling_params = request.to_sampling_params() lora_request = self._maybe_get_lora(request) guided_decode_logits_processor = ( @@ -76,12 +74,21 @@ async def create_chat_completion( sampling_params.logits_processors = [] sampling_params.logits_processors.append( guided_decode_logits_processor) + + prompt_ids, prompt_text = self._tokenize_input_text( + request, + prompt, + truncate_prompt_tokens=sampling_params.truncate_prompt_tokens, + ) + + result_generator = self.engine.generate(prompt_text, + sampling_params, + request_id, prompt_ids, + lora_request) except ValueError as e: + # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) - result_generator = self.engine.generate(prompt_text, sampling_params, - request_id, prompt_ids, - lora_request) # Streaming response if request.stream: return self.chat_completion_stream_generator( diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index 7fb47ffdc855..8b0bec7fbda6 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -1,6 +1,6 @@ import time from typing import (AsyncGenerator, AsyncIterator, Callable, Dict, List, - Literal, Optional, Tuple, TypedDict, Union) + Optional, Tuple) from fastapi import Request @@ -11,7 +11,8 @@ CompletionResponseStreamChoice, CompletionStreamResponse, LogProbs, UsageInfo) -from vllm.entrypoints.openai.serving_engine import LoRA, OpenAIServing +from vllm.entrypoints.openai.serving_engine import (LoRAModulePath, + OpenAIServing) from vllm.logger import init_logger from vllm.model_executor.guided_decoding import ( get_guided_decoding_logits_processor) @@ -26,53 +27,12 @@ [TypeTokenIDs, TypeTopLogProbs, Optional[int], int], LogProbs] -class PromptStrings(TypedDict): - prompt: str - is_tokens: Literal[False] - - -class PromptTokens(TypedDict): - prompt: List[int] - is_tokens: Literal[True] - - -def _parse_prompt_element_format( - elem: Union[str, int, - List[int]]) -> Union[PromptStrings, PromptTokens]: - if isinstance(elem, str): - # case 2: array of strings - return PromptStrings(prompt=elem, is_tokens=False) - if isinstance(elem, int): - # case 3: array of tokens - return PromptTokens(prompt=[elem], is_tokens=True) - if isinstance(elem, list): - # case 4: array of token arrays - return PromptTokens(prompt=elem, is_tokens=True) - - -def parse_prompt_format( - prompt: Union[str, List[str], List[int], List[List[int]]] -) -> List[Union[PromptStrings, PromptTokens]]: - # get the prompt, openai supports the following - # "a string, array of strings, array of tokens, or array of token arrays." - - if isinstance(prompt, str): - # case 1: a string - return [_parse_prompt_element_format(prompt)] - - if isinstance(prompt, list): - return [_parse_prompt_element_format(elem) for elem in prompt] - - raise ValueError("prompt must be a string, array of strings, " - "array of tokens, or array of token arrays") - - class OpenAIServingCompletion(OpenAIServing): def __init__(self, engine: AsyncLLMEngine, served_model: str, - lora_modules: Optional[List[LoRA]] = None): + lora_modules: Optional[List[LoRAModulePath]] = None): super().__init__(engine=engine, served_model=served_model, lora_modules=lora_modules) @@ -115,24 +75,13 @@ async def create_completion(self, request: CompletionRequest, sampling_params.logits_processors.append( guided_decode_logit_processor) - prompts = parse_prompt_format(request.prompt) - truncate_prompt_tokens = sampling_params.truncate_prompt_tokens - - for i, prompt in enumerate(prompts): - if prompt["is_tokens"]: - prompt_formats = self._validate_prompt_and_tokenize( - request, - prompt_ids=prompt["prompt"], - truncate_prompt_tokens=truncate_prompt_tokens, - ) - else: - prompt_formats = self._validate_prompt_and_tokenize( + for i, (prompt_ids, prompt_text) in enumerate( + self._tokenize_input_text_or_texts( request, - prompt=prompt["prompt"], - truncate_prompt_tokens=truncate_prompt_tokens, - ) - prompt_ids, prompt_text = prompt_formats - + request.prompt, + truncate_prompt_tokens=sampling_params. + truncate_prompt_tokens, + )): generators.append( self.engine.generate(prompt_text, sampling_params, @@ -155,16 +104,18 @@ async def create_completion(self, request: CompletionRequest, # Streaming response if stream: - return self.completion_stream_generator(request, - raw_request, - result_generator, - request_id, - created_time, - model_name, - num_prompts=len(prompts)) + return self.completion_stream_generator( + request, + raw_request, + result_generator, + request_id, + created_time, + model_name, + num_prompts=len(generators)) # Non-streaming response - final_res_batch: List[Optional[RequestOutput]] = [None] * len(prompts) + final_res_batch: List[Optional[RequestOutput]] = [None + ] * len(generators) try: async for i, res in result_generator: if await raw_request.is_disconnected(): diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index b2d055bea352..0e896c455c27 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -2,7 +2,8 @@ import json from dataclasses import dataclass from http import HTTPStatus -from typing import Dict, List, Optional, Tuple, Union +from typing import (Dict, Iterable, Iterator, List, Literal, Optional, Tuple, + TypedDict, Union) from pydantic import conint @@ -19,8 +20,18 @@ logger = init_logger(__name__) +class InputStrings(TypedDict): + input_text: str + is_tokens: Literal[False] + + +class InputTokens(TypedDict): + input_text: List[int] + is_tokens: Literal[True] + + @dataclass -class LoRA: +class LoRAModulePath: name: str local_path: str @@ -28,7 +39,7 @@ class LoRA: class OpenAIServing: def __init__(self, engine: AsyncLLMEngine, served_model: str, - lora_modules: Optional[List[LoRA]]): + lora_modules: Optional[List[LoRAModulePath]]): self.engine = engine self.served_model = served_model if lora_modules is None: @@ -147,7 +158,9 @@ def create_streaming_error_response( }) return json_str - async def _check_model(self, request) -> Optional[ErrorResponse]: + async def _check_model( + self, request: Union[CompletionRequest, ChatCompletionRequest] + ) -> Optional[ErrorResponse]: if request.model == self.served_model: return None if request.model in [lora.lora_name for lora in self.lora_requests]: @@ -157,9 +170,11 @@ async def _check_model(self, request) -> Optional[ErrorResponse]: err_type="NotFoundError", status_code=HTTPStatus.NOT_FOUND) - def _maybe_get_lora(self, request) -> Optional[LoRARequest]: + def _maybe_get_lora( + self, request: Union[CompletionRequest, ChatCompletionRequest] + ) -> Optional[LoRARequest]: if request.model == self.served_model: - return + return None for lora in self.lora_requests: if request.model == lora.lora_name: return lora @@ -207,3 +222,92 @@ def _validate_prompt_and_tokenize( f"Please reduce the length of the messages or completion.", ) else: return input_ids, input_text + + # https://platform.openai.com/docs/api-reference/embeddings/create + def _tokenize_input_text( + self, + request: Union[ChatCompletionRequest, CompletionRequest], + input_text: Union[str, List[int]], + truncate_prompt_tokens: Optional[conint(ge=1)] = None, + ) -> Tuple[List[int], str]: + return next( + self._tokenize_input_texts( + request, + [input_text], + truncate_prompt_tokens=truncate_prompt_tokens, + )) + + def _tokenize_input_texts( + self, + request: Union[ChatCompletionRequest, CompletionRequest], + input_texts: Iterable[Union[str, List[int]]], + truncate_prompt_tokens: Optional[conint(ge=1)] = None, + ) -> Iterator[Tuple[List[int], str]]: + for input_text in input_texts: + if isinstance(input_text, str): + yield self._validate_prompt_and_tokenize( + request, + prompt=input_text, + truncate_prompt_tokens=truncate_prompt_tokens, + ) + else: + yield self._validate_prompt_and_tokenize( + request, + prompt_ids=input_text, + truncate_prompt_tokens=truncate_prompt_tokens, + ) + + def _parse_input_element( + self, + elem: Union[str, int, List[int]], + ) -> Union[InputStrings, InputTokens]: + if isinstance(elem, str): + # case 2: array of strings + return InputStrings(prompt=elem, is_tokens=False) + if isinstance(elem, int): + # case 3: array of tokens + return InputTokens(prompt=[elem], is_tokens=True) + if isinstance(elem, list): + # case 4: array of token arrays + return InputTokens(prompt=elem, is_tokens=True) + + def _parse_input_text_or_texts( + self, + input_text_or_texts: Union[str, List[str], List[int], List[List[int]]], + ) -> List[Union[InputStrings, InputTokens]]: + # get the prompt, openai supports the following: + # a string, array of strings, array of tokens, or array of token arrays + + if isinstance(input_text_or_texts, str): + # case 1: a string + return [self._parse_input_element(input_text_or_texts)] + + if isinstance(input_text_or_texts, list): + return [self._parse_input_element(e) for e in input_text_or_texts] + + raise ValueError("prompt must be a string, array of strings, " + "array of tokens, or array of token arrays") + + def _tokenize_input_text_or_texts( + self, + request: Union[ChatCompletionRequest, CompletionRequest], + input_text_or_texts: Union[str, List[str], List[int], List[List[int]]], + truncate_prompt_tokens: Optional[conint(ge=1)] = None, + ) -> Iterator[Tuple[List[int], str]]: + for input_ in self._parse_input_text_or_texts(input_text_or_texts): + # Although our type checking is based on mypy, + # VSCode Pyright extension should still work properly + # "is True" is required for Pyright to perform type narrowing + # See: https://github.com/microsoft/pyright/issues/7672 + if input_["is_tokens"] is True: + yield self._validate_prompt_and_tokenize( + request, + prompt_ids=input_["input_text"], + truncate_prompt_tokens=truncate_prompt_tokens, + ) + else: + yield self._validate_prompt_and_tokenize( + request, + prompt=input_["input_text"], + truncate_prompt_tokens=truncate_prompt_tokens, + ) From a7d109853a93dd8114c984ae18bf9122e14c4128 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 10:02:39 +0000 Subject: [PATCH 17/45] Make code more readable --- vllm/entrypoints/openai/serving_chat.py | 18 +++++----- vllm/entrypoints/openai/serving_completion.py | 34 +++++++++---------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 58856bd96f9f..664899e262c0 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -124,7 +124,6 @@ async def chat_completion_stream_generator( try: async for res in result_generator: - res: RequestOutput # We need to do it here, because if there are exceptions in # the result_generator, it needs to be sent as the FIRST # response (by the try...catch). @@ -322,24 +321,25 @@ async def chat_completion_full_generator( return response - def _load_chat_template(self, chat_template): + def _load_chat_template(self, chat_template: str): + tokenizer = self.tokenizer + assert tokenizer is not None + if chat_template is not None: try: with open(chat_template, "r") as f: - self.tokenizer.chat_template = f.read() + tokenizer.chat_template = f.read() except OSError: # If opening a file fails, set chat template to be args to # ensure we decode so our escape are interpreted correctly - self.tokenizer.chat_template = codecs.decode( + tokenizer.chat_template = codecs.decode( chat_template, "unicode_escape") logger.info( - f"Using supplied chat template:\n{self.tokenizer.chat_template}" - ) - elif self.tokenizer.chat_template is not None: + f"Using supplied chat template:\n{tokenizer.chat_template}") + elif tokenizer.chat_template is not None: logger.info( - f"Using default chat template:\n{self.tokenizer.chat_template}" - ) + f"Using default chat template:\n{tokenizer.chat_template}") else: logger.warning( "No chat template provided. Chat API will not work.") diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index 8b0bec7fbda6..7f1ebd53e85e 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -75,13 +75,15 @@ async def create_completion(self, request: CompletionRequest, sampling_params.logits_processors.append( guided_decode_logit_processor) - for i, (prompt_ids, prompt_text) in enumerate( - self._tokenize_input_text_or_texts( - request, - request.prompt, - truncate_prompt_tokens=sampling_params. - truncate_prompt_tokens, - )): + prompts = list( + self._tokenize_input_text_or_texts( + request, + request.prompt, + truncate_prompt_tokens=sampling_params. + truncate_prompt_tokens, + )) + + for i, (prompt_ids, prompt_text) in enumerate(prompts): generators.append( self.engine.generate(prompt_text, sampling_params, @@ -104,18 +106,16 @@ async def create_completion(self, request: CompletionRequest, # Streaming response if stream: - return self.completion_stream_generator( - request, - raw_request, - result_generator, - request_id, - created_time, - model_name, - num_prompts=len(generators)) + return self.completion_stream_generator(request, + raw_request, + result_generator, + request_id, + created_time, + model_name, + num_prompts=len(prompts)) # Non-streaming response - final_res_batch: List[Optional[RequestOutput]] = [None - ] * len(generators) + final_res_batch: List[Optional[RequestOutput]] = [None] * len(prompts) try: async for i, res in result_generator: if await raw_request.is_disconnected(): From 8b9d6368846e3c12cb591e4f71a37d746a759d49 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 10:08:50 +0000 Subject: [PATCH 18/45] Move assertion to a more appropriate place --- vllm/entrypoints/openai/serving_completion.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index 7f1ebd53e85e..10e368ecfd7b 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -123,8 +123,19 @@ async def create_completion(self, request: CompletionRequest, await self.engine.abort(f"{request_id}-{i}") return self.create_error_response("Client disconnected") final_res_batch[i] = res + + final_res_batch_checked: List[RequestOutput] = [] + for final_res in final_res_batch: + assert final_res is not None + final_res_batch_checked.append(final_res) + response = self.request_output_to_completion_response( - final_res_batch, request, request_id, created_time, model_name) + final_res_batch_checked, + request, + request_id, + created_time, + model_name, + ) except ValueError as e: # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) @@ -252,7 +263,6 @@ def request_output_to_completion_response( num_generated_tokens = 0 for final_res in final_res_batch: - assert final_res is not None prompt_token_ids = final_res.prompt_token_ids prompt_logprobs = final_res.prompt_logprobs prompt_text = final_res.prompt From c48c13a4bc1e5f49a6b729fb177b81347e457466 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 10:18:47 +0000 Subject: [PATCH 19/45] Add code documentation --- vllm/entrypoints/openai/serving_engine.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index 0e896c455c27..d04a2af9ba74 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -223,13 +223,15 @@ def _validate_prompt_and_tokenize( else: return input_ids, input_text - # https://platform.openai.com/docs/api-reference/embeddings/create def _tokenize_input_text( self, request: Union[ChatCompletionRequest, CompletionRequest], input_text: Union[str, List[int]], truncate_prompt_tokens: Optional[conint(ge=1)] = None, ) -> Tuple[List[int], str]: + """A simpler implementation of + :meth:`~vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_input_text_or_texts` + that assumes single input.""" return next( self._tokenize_input_texts( request, @@ -243,6 +245,9 @@ def _tokenize_input_texts( input_texts: Iterable[Union[str, List[int]]], truncate_prompt_tokens: Optional[conint(ge=1)] = None, ) -> Iterator[Tuple[List[int], str]]: + """A simpler implementation of + :meth:`~vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_input_text_or_texts` + that assumes multiple input.""" for input_text in input_texts: if isinstance(input_text, str): yield self._validate_prompt_and_tokenize( @@ -275,9 +280,6 @@ def _parse_input_text_or_texts( self, input_text_or_texts: Union[str, List[str], List[int], List[List[int]]], ) -> List[Union[InputStrings, InputTokens]]: - # get the prompt, openai supports the following: - # a string, array of strings, array of tokens, or array of token arrays - if isinstance(input_text_or_texts, str): # case 1: a string return [self._parse_input_element(input_text_or_texts)] @@ -294,6 +296,12 @@ def _tokenize_input_text_or_texts( input_text_or_texts: Union[str, List[str], List[int], List[List[int]]], truncate_prompt_tokens: Optional[conint(ge=1)] = None, ) -> Iterator[Tuple[List[int], str]]: + """Tokenize/detokenize depending on the input format. + + According to `OpenAI API `_ + , each input can be a string or array of tokens. Note that each request + can pass one or more inputs. + """ for input_ in self._parse_input_text_or_texts(input_text_or_texts): # Although our type checking is based on mypy, # VSCode Pyright extension should still work properly From 35303626d45f7f7257ac3f5d385758cd52e94270 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 10:47:36 +0000 Subject: [PATCH 20/45] Decompose `_validate_prompt_and_tokenize` --- vllm/entrypoints/openai/serving_chat.py | 2 +- vllm/entrypoints/openai/serving_completion.py | 2 +- vllm/entrypoints/openai/serving_engine.py | 146 +++++++++++------- 3 files changed, 89 insertions(+), 61 deletions(-) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 664899e262c0..0b30366d716d 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -75,7 +75,7 @@ async def create_chat_completion( sampling_params.logits_processors.append( guided_decode_logits_processor) - prompt_ids, prompt_text = self._tokenize_input_text( + prompt_ids, prompt_text = self._tokenize_prompt_input( request, prompt, truncate_prompt_tokens=sampling_params.truncate_prompt_tokens, diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index 10e368ecfd7b..f80d1bd5c448 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -76,7 +76,7 @@ async def create_completion(self, request: CompletionRequest, guided_decode_logit_processor) prompts = list( - self._tokenize_input_text_or_texts( + self._tokenize_prompt_input_or_inputs( request, request.prompt, truncate_prompt_tokens=sampling_params. diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index d04a2af9ba74..7b3bbcc4c7b9 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -6,6 +6,7 @@ TypedDict, Union) from pydantic import conint +from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, @@ -20,13 +21,13 @@ logger = init_logger(__name__) -class InputStrings(TypedDict): - input_text: str +class InputString(TypedDict): + text: str is_tokens: Literal[False] class InputTokens(TypedDict): - input_text: List[int] + text: List[int] is_tokens: Literal[True] @@ -181,32 +182,48 @@ def _maybe_get_lora( # if _check_model has been called earlier, this will be unreachable raise ValueError("The model `{request.model}` does not exist.") - def _validate_prompt_and_tokenize( + def _normalize_prompt_text_to_input( self, request: Union[ChatCompletionRequest, CompletionRequest], - prompt: Optional[str] = None, - prompt_ids: Optional[List[int]] = None, + prompt: str, + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], truncate_prompt_tokens: Optional[conint(ge=1)] = None ) -> Tuple[List[int], str]: - if not (prompt or prompt_ids): - raise ValueError("Either prompt or prompt_ids should be provided.") - if (prompt and prompt_ids): - raise ValueError( - "Only one of prompt or prompt_ids should be provided.") - - if prompt_ids is None: - tokenizer_kwargs = {} if truncate_prompt_tokens is None else { - "truncation": True, - "max_length": truncate_prompt_tokens, - } - input_ids = self.tokenizer(prompt, **tokenizer_kwargs).input_ids - elif truncate_prompt_tokens is not None: - input_ids = prompt_ids[-truncate_prompt_tokens:] + if truncate_prompt_tokens is None: + encoded = tokenizer(prompt) else: + encoded = tokenizer(prompt, + truncation=True, + max_length=truncate_prompt_tokens) + + input_ids = encoded.input_ids + + input_text = prompt + + return self._validate_input(request, input_ids, input_text) + + def _normalize_prompt_tokens_to_input( + self, + request: Union[ChatCompletionRequest, CompletionRequest], + prompt_ids: List[int], + tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], + truncate_prompt_tokens: Optional[conint(ge=1)] = None + ) -> Tuple[List[int], str]: + if truncate_prompt_tokens is None: input_ids = prompt_ids + else: + input_ids = prompt_ids[-truncate_prompt_tokens:] + + input_text = tokenizer.decode(prompt_ids) - input_text = prompt if prompt is not None else self.tokenizer.decode( - prompt_ids) + return self._validate_input(request, input_ids, input_text) + + def _validate_input( + self, + request: Union[ChatCompletionRequest, CompletionRequest], + input_ids: List[int], + input_text: str, + ) -> Tuple[List[int], str]: token_num = len(input_ids) if request.max_tokens is None: @@ -220,80 +237,85 @@ def _validate_prompt_and_tokenize( f"({token_num} in the messages, " f"{request.max_tokens} in the completion). " f"Please reduce the length of the messages or completion.", ) - else: - return input_ids, input_text - def _tokenize_input_text( + return input_ids, input_text + + def _tokenize_prompt_input( self, request: Union[ChatCompletionRequest, CompletionRequest], - input_text: Union[str, List[int]], + prompt_input: Union[str, List[int]], truncate_prompt_tokens: Optional[conint(ge=1)] = None, ) -> Tuple[List[int], str]: """A simpler implementation of - :meth:`~vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_input_text_or_texts` + :meth:`~vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_prompt_input_or_inputs` that assumes single input.""" return next( - self._tokenize_input_texts( + self._tokenize_prompt_inputs( request, - [input_text], + [prompt_input], truncate_prompt_tokens=truncate_prompt_tokens, )) - def _tokenize_input_texts( + def _tokenize_prompt_inputs( self, request: Union[ChatCompletionRequest, CompletionRequest], - input_texts: Iterable[Union[str, List[int]]], + prompt_inputs: Iterable[Union[str, List[int]]], truncate_prompt_tokens: Optional[conint(ge=1)] = None, ) -> Iterator[Tuple[List[int], str]]: """A simpler implementation of - :meth:`~vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_input_text_or_texts` - that assumes multiple input.""" - for input_text in input_texts: - if isinstance(input_text, str): - yield self._validate_prompt_and_tokenize( + :meth:`~vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_prompt_input_or_inputs` + that assumes multiple inputs.""" + tokenizer = self.tokenizer + assert tokenizer is not None + + for text in prompt_inputs: + if isinstance(text, str): + yield self._normalize_prompt_text_to_input( request, - prompt=input_text, + prompt=text, + tokenizer=tokenizer, truncate_prompt_tokens=truncate_prompt_tokens, ) else: - yield self._validate_prompt_and_tokenize( + yield self._normalize_prompt_tokens_to_input( request, - prompt_ids=input_text, + prompt_ids=text, + tokenizer=tokenizer, truncate_prompt_tokens=truncate_prompt_tokens, ) - def _parse_input_element( + def _parse_prompt_element( self, elem: Union[str, int, List[int]], - ) -> Union[InputStrings, InputTokens]: + ) -> Union[InputString, InputTokens]: if isinstance(elem, str): # case 2: array of strings - return InputStrings(prompt=elem, is_tokens=False) + return InputString(text=elem, is_tokens=False) if isinstance(elem, int): # case 3: array of tokens - return InputTokens(prompt=[elem], is_tokens=True) + return InputTokens(text=[elem], is_tokens=True) if isinstance(elem, list): # case 4: array of token arrays - return InputTokens(prompt=elem, is_tokens=True) + return InputTokens(text=elem, is_tokens=True) - def _parse_input_text_or_texts( + def _parse_prompt_input_or_inputs( self, - input_text_or_texts: Union[str, List[str], List[int], List[List[int]]], - ) -> List[Union[InputStrings, InputTokens]]: - if isinstance(input_text_or_texts, str): + input_or_inputs: Union[str, List[str], List[int], List[List[int]]], + ) -> List[Union[InputString, InputTokens]]: + if isinstance(input_or_inputs, str): # case 1: a string - return [self._parse_input_element(input_text_or_texts)] + return [self._parse_prompt_element(input_or_inputs)] - if isinstance(input_text_or_texts, list): - return [self._parse_input_element(e) for e in input_text_or_texts] + if isinstance(input_or_inputs, list): + return [self._parse_prompt_element(e) for e in input_or_inputs] raise ValueError("prompt must be a string, array of strings, " "array of tokens, or array of token arrays") - def _tokenize_input_text_or_texts( + def _tokenize_prompt_input_or_inputs( self, request: Union[ChatCompletionRequest, CompletionRequest], - input_text_or_texts: Union[str, List[str], List[int], List[List[int]]], + input_or_inputs: Union[str, List[str], List[int], List[List[int]]], truncate_prompt_tokens: Optional[conint(ge=1)] = None, ) -> Iterator[Tuple[List[int], str]]: """Tokenize/detokenize depending on the input format. @@ -302,20 +324,26 @@ def _tokenize_input_text_or_texts( , each input can be a string or array of tokens. Note that each request can pass one or more inputs. """ - for input_ in self._parse_input_text_or_texts(input_text_or_texts): + tokenizer = self.tokenizer + assert tokenizer is not None + + for prompt_input in self._parse_prompt_input_or_inputs( + input_or_inputs): # Although our type checking is based on mypy, # VSCode Pyright extension should still work properly # "is True" is required for Pyright to perform type narrowing # See: https://github.com/microsoft/pyright/issues/7672 - if input_["is_tokens"] is True: - yield self._validate_prompt_and_tokenize( + if prompt_input["is_tokens"] is False: + yield self._normalize_prompt_text_to_input( request, - prompt_ids=input_["input_text"], + prompt=prompt_input["text"], + tokenizer=tokenizer, truncate_prompt_tokens=truncate_prompt_tokens, ) else: - yield self._validate_prompt_and_tokenize( + yield self._normalize_prompt_tokens_to_input( request, - prompt=input_["input_text"], + prompt_ids=prompt_input["text"], + tokenizer=tokenizer, truncate_prompt_tokens=truncate_prompt_tokens, ) From b8feec974209f87c78a93a18972e9c75fd68945e Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 12 Apr 2024 19:08:00 +0800 Subject: [PATCH 21/45] Fix missing import due to renaming --- vllm/entrypoints/openai/cli_args.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/entrypoints/openai/cli_args.py b/vllm/entrypoints/openai/cli_args.py index cc71931b9795..0bd15b667c65 100644 --- a/vllm/entrypoints/openai/cli_args.py +++ b/vllm/entrypoints/openai/cli_args.py @@ -9,7 +9,7 @@ import ssl from vllm.engine.arg_utils import AsyncEngineArgs -from vllm.entrypoints.openai.serving_engine import LoRA +from vllm.entrypoints.openai.serving_engine import LoRAModulePath class LoRAParserAction(argparse.Action): @@ -18,7 +18,7 @@ def __call__(self, parser, namespace, values, option_string=None): lora_list = [] for item in values: name, path = item.split('=') - lora_list.append(LoRA(name, path)) + lora_list.append(LoRAModulePath(name, path)) setattr(namespace, self.dest, lora_list) From cc1a5b3eeccaa50419082fc9cd80a8ac662b1fdc Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sat, 13 Apr 2024 04:38:57 +0000 Subject: [PATCH 22/45] Fix bug when parsing array of tokens --- vllm/entrypoints/openai/serving_engine.py | 39 +++++++++++++---------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index 7b3bbcc4c7b9..4f5258b048c4 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -3,7 +3,7 @@ from dataclasses import dataclass from http import HTTPStatus from typing import (Dict, Iterable, Iterator, List, Literal, Optional, Tuple, - TypedDict, Union) + TypedDict, Union, cast) from pydantic import conint from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast @@ -284,30 +284,35 @@ def _tokenize_prompt_inputs( truncate_prompt_tokens=truncate_prompt_tokens, ) - def _parse_prompt_element( - self, - elem: Union[str, int, List[int]], - ) -> Union[InputString, InputTokens]: - if isinstance(elem, str): - # case 2: array of strings - return InputString(text=elem, is_tokens=False) - if isinstance(elem, int): - # case 3: array of tokens - return InputTokens(text=[elem], is_tokens=True) - if isinstance(elem, list): - # case 4: array of token arrays - return InputTokens(text=elem, is_tokens=True) - def _parse_prompt_input_or_inputs( self, input_or_inputs: Union[str, List[str], List[int], List[List[int]]], ) -> List[Union[InputString, InputTokens]]: if isinstance(input_or_inputs, str): # case 1: a string - return [self._parse_prompt_element(input_or_inputs)] + elem = input_or_inputs + return [InputString(text=elem, is_tokens=False)] if isinstance(input_or_inputs, list): - return [self._parse_prompt_element(e) for e in input_or_inputs] + if len(input_or_inputs) == 0: + raise ValueError("please provide at least one prompt") + if isinstance(input_or_inputs[0], str): + # case 2: array of strings + return [ + InputString(text=elem, is_tokens=False) + for elem in cast(List[str], input_or_inputs) + ] + if isinstance(input_or_inputs[0], int): + # case 3: array of tokens + elem = cast(List[int], input_or_inputs) + return [InputTokens(text=elem, is_tokens=True)] + if isinstance(input_or_inputs[0], list) and isinstance( + input_or_inputs[0][0], int): + # case 4: array of token arrays + return [ + InputTokens(text=elem, is_tokens=True) + for elem in cast(List[List[int]], input_or_inputs) + ] raise ValueError("prompt must be a string, array of strings, " "array of tokens, or array of token arrays") From f9c1135e353921d5e00d22f177a5516c9317d87a Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sat, 13 Apr 2024 05:48:37 +0000 Subject: [PATCH 23/45] Add token array to batch completions testing --- tests/entrypoints/test_openai_server.py | 92 +++++++++++++------------ 1 file changed, 47 insertions(+), 45 deletions(-) diff --git a/tests/entrypoints/test_openai_server.py b/tests/entrypoints/test_openai_server.py index 7940430b8b65..d83692d5cbdb 100644 --- a/tests/entrypoints/test_openai_server.py +++ b/tests/entrypoints/test_openai_server.py @@ -413,50 +413,52 @@ async def test_chat_streaming(server, client: openai.AsyncOpenAI, ) async def test_batch_completions(server, client: openai.AsyncOpenAI, model_name: str): - # test simple list - batch = await client.completions.create( - model=model_name, - prompt=["Hello, my name is", "Hello, my name is"], - max_tokens=5, - temperature=0.0, - ) - assert len(batch.choices) == 2 - assert batch.choices[0].text == batch.choices[1].text - - # test n = 2 - batch = await client.completions.create( - model=model_name, - prompt=["Hello, my name is", "Hello, my name is"], - n=2, - max_tokens=5, - temperature=0.0, - extra_body=dict( - # NOTE: this has to be true for n > 1 in vLLM, but not necessary - # for official client. - use_beam_search=True), - ) - assert len(batch.choices) == 4 - assert batch.choices[0].text != batch.choices[ - 1].text, "beam search should be different" - assert batch.choices[0].text == batch.choices[ - 2].text, "two copies of the same prompt should be the same" - assert batch.choices[1].text == batch.choices[ - 3].text, "two copies of the same prompt should be the same" - - # test streaming - batch = await client.completions.create( - model=model_name, - prompt=["Hello, my name is", "Hello, my name is"], - max_tokens=5, - temperature=0.0, - stream=True, - ) - texts = [""] * 2 - async for chunk in batch: - assert len(chunk.choices) == 1 - choice = chunk.choices[0] - texts[choice.index] += choice.text - assert texts[0] == texts[1] + # test using text and token IDs + for prompts in (["Hello, my name is"] * 2, [[0, 0, 0, 0, 0]] * 2): + # test simple list + batch = await client.completions.create( + model=model_name, + prompt=prompts, + max_tokens=5, + temperature=0.0, + ) + assert len(batch.choices) == 2 + assert batch.choices[0].text == batch.choices[1].text + + # test n = 2 + batch = await client.completions.create( + model=model_name, + prompt=prompts, + n=2, + max_tokens=5, + temperature=0.0, + extra_body=dict( + # NOTE: this has to be true for n > 1 in vLLM, but not necessary + # for official client. + use_beam_search=True), + ) + assert len(batch.choices) == 4 + assert batch.choices[0].text != batch.choices[ + 1].text, "beam search should be different" + assert batch.choices[0].text == batch.choices[ + 2].text, "two copies of the same prompt should be the same" + assert batch.choices[1].text == batch.choices[ + 3].text, "two copies of the same prompt should be the same" + + # test streaming + batch = await client.completions.create( + model=model_name, + prompt=prompts, + max_tokens=5, + temperature=0.0, + stream=True, + ) + texts = [""] * 2 + async for chunk in batch: + assert len(chunk.choices) == 1 + choice = chunk.choices[0] + texts[choice.index] += choice.text + assert texts[0] == texts[1] async def test_logits_bias(server, client: openai.AsyncOpenAI): @@ -762,7 +764,7 @@ async def test_echo_logprob_completion(server, client: openai.AsyncOpenAI, prompt_text = tokenizer.decode(prompt) if isinstance(prompt, list) else prompt assert (completion.choices[0].text is not None - and re.search(r"^" + prompt_text, completion.choices[0].text)) + and completion.choices[0].text.startswith(prompt_text)) logprobs = completion.choices[0].logprobs assert logprobs is not None assert len(logprobs.text_offset) > 5 From f2e818055e31170a7d726498464791777f0bf828 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sun, 14 Apr 2024 04:48:14 +0000 Subject: [PATCH 24/45] Replace legacy `conint` with `Annotated` field --- vllm/entrypoints/openai/protocol.py | 5 +++-- vllm/entrypoints/openai/serving_engine.py | 13 +++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index c06fc027d3c8..4358b6000d33 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -4,7 +4,8 @@ from typing import Dict, List, Literal, Optional, Union import torch -from pydantic import BaseModel, Field, conint, model_validator +from pydantic import BaseModel, Field, model_validator +from typing_extensions import Annotated from vllm.sampling_params import SamplingParams from vllm.utils import random_uuid @@ -229,7 +230,7 @@ class CompletionRequest(BaseModel): min_tokens: Optional[int] = 0 skip_special_tokens: Optional[bool] = True spaces_between_special_tokens: Optional[bool] = True - truncate_prompt_tokens: Optional[conint(ge=1)] = None + truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None # doc: end-completion-sampling-params # doc: begin-completion-extra-params diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index 4f5258b048c4..3ebaa7315711 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -5,8 +5,9 @@ from typing import (Dict, Iterable, Iterator, List, Literal, Optional, Tuple, TypedDict, Union, cast) -from pydantic import conint +from pydantic import Field from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast +from typing_extensions import Annotated from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, @@ -187,7 +188,7 @@ def _normalize_prompt_text_to_input( request: Union[ChatCompletionRequest, CompletionRequest], prompt: str, tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], - truncate_prompt_tokens: Optional[conint(ge=1)] = None + truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None ) -> Tuple[List[int], str]: if truncate_prompt_tokens is None: encoded = tokenizer(prompt) @@ -207,7 +208,7 @@ def _normalize_prompt_tokens_to_input( request: Union[ChatCompletionRequest, CompletionRequest], prompt_ids: List[int], tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], - truncate_prompt_tokens: Optional[conint(ge=1)] = None + truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None ) -> Tuple[List[int], str]: if truncate_prompt_tokens is None: input_ids = prompt_ids @@ -244,7 +245,7 @@ def _tokenize_prompt_input( self, request: Union[ChatCompletionRequest, CompletionRequest], prompt_input: Union[str, List[int]], - truncate_prompt_tokens: Optional[conint(ge=1)] = None, + truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None, ) -> Tuple[List[int], str]: """A simpler implementation of :meth:`~vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_prompt_input_or_inputs` @@ -260,7 +261,7 @@ def _tokenize_prompt_inputs( self, request: Union[ChatCompletionRequest, CompletionRequest], prompt_inputs: Iterable[Union[str, List[int]]], - truncate_prompt_tokens: Optional[conint(ge=1)] = None, + truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None, ) -> Iterator[Tuple[List[int], str]]: """A simpler implementation of :meth:`~vllm.entrypoints.openai.serving_engine.OpenAIServing._tokenize_prompt_input_or_inputs` @@ -321,7 +322,7 @@ def _tokenize_prompt_input_or_inputs( self, request: Union[ChatCompletionRequest, CompletionRequest], input_or_inputs: Union[str, List[str], List[int], List[List[int]]], - truncate_prompt_tokens: Optional[conint(ge=1)] = None, + truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None, ) -> Iterator[Tuple[List[int], str]]: """Tokenize/detokenize depending on the input format. From cdbf08a2a4b2bba4424988e3cf8337282a71c75f Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sun, 14 Apr 2024 07:07:34 +0000 Subject: [PATCH 25/45] Load image processor from HuggingFace - Note that multi modal processing logic has been moved from `LLM` to `LLMEngine` --- docs/source/models/vlm.rst | 10 ++++- examples/llava_example.py | 4 +- tests/conftest.py | 4 +- .../entrypoints/test_openai_server_vision.py | 2 + vllm/config.py | 3 ++ vllm/engine/arg_utils.py | 24 +++++++++- vllm/engine/llm_engine.py | 36 ++++++++++++++- vllm/entrypoints/llm.py | 20 ++++----- vllm/entrypoints/openai/serving_chat.py | 4 +- vllm/transformers_utils/image_processor.py | 44 +++++++++++++++++++ 10 files changed, 131 insertions(+), 20 deletions(-) create mode 100644 vllm/transformers_utils/image_processor.py diff --git a/docs/source/models/vlm.rst b/docs/source/models/vlm.rst index fcf0d1da3bbc..10bb94f9d1fb 100644 --- a/docs/source/models/vlm.rst +++ b/docs/source/models/vlm.rst @@ -28,6 +28,14 @@ Apart from the :ref:`basic engine arguments `, VLMs additionally re The image feature size along the context dimension. +.. option:: --image-processor + + Name or path of the huggingface image processor to use. + +.. option:: --image-processor-revision + + The specific image processor version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. + Offline Batched Inference ------------------------- @@ -57,7 +65,7 @@ For now, we only support a single image per text prompt when calling ``llm.gener image = ... outputs = llm.generate(prompt, - multi_modal_data=MultiModalData( + multi_modal_datas=MultiModalData( type=MultiModalData.Type.IMAGE, data=image)) for o in outputs: diff --git a/examples/llava_example.py b/examples/llava_example.py index 3d22b492654b..dda93c1b258a 100644 --- a/examples/llava_example.py +++ b/examples/llava_example.py @@ -26,7 +26,7 @@ def run_llava_pixel_values(): images = torch.load("images/stop_sign_pixel_values.pt") outputs = llm.generate(prompt, - multi_modal_data=MultiModalData( + multi_modal_datas=MultiModalData( type=MultiModalData.Type.IMAGE, data=images)) for o in outputs: generated_text = o.outputs[0].text @@ -49,7 +49,7 @@ def run_llava_image_features(): images = torch.load("images/stop_sign_image_features.pt") outputs = llm.generate(prompt, - multi_modal_data=MultiModalData( + multi_modal_datas=MultiModalData( type=MultiModalData.Type.IMAGE, data=images)) for o in outputs: generated_text = o.outputs[0].text diff --git a/tests/conftest.py b/tests/conftest.py index 5c50fc2d1bab..743f695f8608 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -323,8 +323,8 @@ def generate( req_outputs = self.model.generate( prompts, sampling_params=sampling_params, - multi_modal_data=MultiModalData(type=MultiModalData.Type.IMAGE, - data=images) + multi_modal_datas=MultiModalData(type=MultiModalData.Type.IMAGE, + data=images) if images is not None else None) outputs = [] for req_output in req_outputs: diff --git a/tests/entrypoints/test_openai_server_vision.py b/tests/entrypoints/test_openai_server_vision.py index f50537bd41a7..e533195f1d57 100644 --- a/tests/entrypoints/test_openai_server_vision.py +++ b/tests/entrypoints/test_openai_server_vision.py @@ -28,6 +28,8 @@ def server(): ray.init() server_runner = ServerRunner.remote([ + "--port", + "8001", "--model", MODEL_NAME, # use half precision for speed and memory savings in CI environment diff --git a/vllm/config.py b/vllm/config.py index 981d23442a8d..c2993fcfbb0c 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -915,6 +915,9 @@ class ImageOpenAI(enum.Enum): # worst case scenario (biggest supported resolution). image_input_shape: tuple image_feature_size: int + # The image processor to load from HuggingFace + image_processor: str + image_processor_revision: Optional[str] image_openai: ImageOpenAI = ImageOpenAI.SINGLE_IMAGE diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 80a7c7871e23..33d2007ce380 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -63,6 +63,8 @@ class EngineArgs: image_token_id: Optional[int] = None image_input_shape: Optional[str] = None image_feature_size: Optional[int] = None + image_processor: Optional[str] = None + image_processor_revision: Optional[str] = None image_openai: str = VisionLanguageConfig.ImageOpenAI.SINGLE_IMAGE.name scheduler_delay_factor: float = 0.0 @@ -76,6 +78,11 @@ def __post_init__(self): if self.tokenizer is None: self.tokenizer = self.model + if (self.image_processor is None + # Only attempt to load image processor if VLM config is given + and self.image_input_type is not None): + self.image_processor = self.model + @staticmethod def add_cli_args( parser: argparse.ArgumentParser) -> argparse.ArgumentParser: @@ -355,6 +362,7 @@ def add_cli_args( choices=["auto", "cuda", "neuron", "cpu"], help='Device type for vLLM execution.') # Related to Vision-language models such as llava + # (listed separately in docs/source/models/vlm.rst) parser.add_argument('--image-input-type', type=str, default=None, @@ -384,6 +392,19 @@ def add_cli_args( default=VisionLanguageConfig.ImageOpenAI.SINGLE_IMAGE.name.lower(), choices=[t.name.lower() for t in VisionLanguageConfig.ImageOpenAI], help=('Specifies how the model implements GPT-4 with Vision API.')) + parser.add_argument( + '--image-processor', + type=str, + default=EngineArgs.image_processor, + help='name or path of the huggingface image processor to use') + parser.add_argument( + '--image-processor-revision', + type=str, + default=None, + help='the specific image processor version to use. It can be a ' + 'branch name, a tag name, or a commit id. If unspecified, will use ' + 'the default version.') + parser.add_argument( '--scheduler-delay-factor', type=float, @@ -402,7 +423,6 @@ def add_cli_args( default=None, help= 'The name of the draft model to be used in speculative decoding.') - parser.add_argument( '--num-speculative-tokens', type=int, @@ -483,6 +503,8 @@ def create_engine_config(self, ) -> EngineConfig: image_token_id=self.image_token_id, image_input_shape=str_to_int_tuple(self.image_input_shape), image_feature_size=self.image_feature_size, + image_processor=self.image_processor, + image_processor_revision=self.image_processor_revision, image_openai=VisionLanguageConfig.get_image_openai_enum_type( self.image_openai), ) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index a91629a63059..711f041ff98c 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -1,7 +1,7 @@ import time from typing import Iterable, List, Optional, Tuple, Type, Union -from transformers import PreTrainedTokenizer +from transformers import PreTrainedTokenizer, TensorType import vllm from vllm.config import (CacheConfig, DeviceConfig, LoRAConfig, ModelConfig, @@ -20,6 +20,7 @@ SequenceGroup, SequenceGroupOutput, SequenceOutput, SequenceStatus) from vllm.transformers_utils.detokenizer import Detokenizer +from vllm.transformers_utils.image_processor import get_image_processor from vllm.transformers_utils.tokenizer_group import (BaseTokenizerGroup, get_tokenizer_group) from vllm.usage.usage_lib import (UsageContext, is_usage_stats_enabled, @@ -113,6 +114,7 @@ def __init__( self.log_stats = log_stats self._init_tokenizer() + self._init_image_processor() self.detokenizer = Detokenizer(self.tokenizer) self.seq_counter = Counter() @@ -261,6 +263,17 @@ def _init_tokenizer(self, **tokenizer_init_kwargs): self.tokenizer: BaseTokenizerGroup = get_tokenizer_group( self.parallel_config.tokenizer_pool_config, **init_kwargs) + def _init_image_processor(self, **processor_init_kwargs): + if self.vision_language_config is None: + self.image_processor = None + else: + self.image_processor = get_image_processor( + self.vision_language_config.image_processor, + trust_remote_code=self.model_config.trust_remote_code, + revision=self.vision_language_config.image_processor_revision, + **processor_init_kwargs, + ) + def _verify_args(self) -> None: self.model_config.verify_with_parallel_config(self.parallel_config) self.cache_config.verify_with_parallel_config(self.parallel_config) @@ -283,6 +296,23 @@ def encode_request( lora_request=lora_request) return prompt_token_ids + def process_multi_modal_data(self, data: MultiModalData) -> MultiModalData: + if data.type == MultiModalData.Type.IMAGE: + image_processor = self.image_processor + if image_processor is None: + return data + + out_dict = image_processor.preprocess(data.data) \ + .convert_to_tensors(TensorType.PYTORCH) + + return MultiModalData( + type=data.type, + data=out_dict["pixel_values"], + ) + else: + msg = f"Unknown data type: {data.type}" + raise NotImplementedError(msg) + def add_request( self, request_id: str, @@ -367,6 +397,10 @@ def add_request( # processing sampling_params.eos_token_id = seq.eos_token_id + # Process multi-modal data + if multi_modal_data is not None: + multi_modal_data = self.process_multi_modal_data(multi_modal_data) + # Create the sequence group. seq_group = SequenceGroup(request_id, [seq], sampling_params, arrival_time, lora_request, multi_modal_data) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 9e08c253dc53..6cb2501368a6 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -1,6 +1,5 @@ from typing import List, Optional, Union -import torch from tqdm import tqdm from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast @@ -130,7 +129,8 @@ def generate( prompt_token_ids: Optional[List[List[int]]] = None, use_tqdm: bool = True, lora_request: Optional[LoRARequest] = None, - multi_modal_data: Optional[MultiModalData] = None, + multi_modal_datas: Optional[Union[ + Optional[MultiModalData], List[Optional[MultiModalData]]]] = None, ) -> List[RequestOutput]: """Generates the completions for the input prompts. @@ -146,7 +146,7 @@ def generate( use the tokenizer to convert the prompts to token IDs. use_tqdm: Whether to use tqdm to display the progress bar. lora_request: LoRA request to use for generation, if any. - multi_modal_data: Multi modal data. + multi_modal_datas: A list of multi modal data, one per prompt. Returns: A list of `RequestOutput` objects containing the generated @@ -165,9 +165,9 @@ def generate( if sampling_params is None: # Use default sampling params. sampling_params = SamplingParams() - - if multi_modal_data: - multi_modal_data.data = multi_modal_data.data.to(torch.float16) + if isinstance(multi_modal_datas, MultiModalData): + # Convert a single multi_modal_data to a list. + multi_modal_datas = [multi_modal_datas] # Add requests to the engine. if prompts is not None: @@ -180,16 +180,14 @@ def generate( prompt = prompts[i] if prompts is not None else None token_ids = None if prompt_token_ids is None else prompt_token_ids[ i] + multi_modal_data = multi_modal_datas[ + i] if multi_modal_datas is not None else None self._add_request( prompt, sampling_params, token_ids, lora_request=lora_request, - # Get ith image while maintaining the batch dim. - multi_modal_data=MultiModalData( - type=multi_modal_data.type, - data=multi_modal_data.data[i].unsqueeze(0)) - if multi_modal_data else None, + multi_modal_data=multi_modal_data, ) return self._run_engine(use_tqdm) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 4c1a064ce3a3..75ea4a58631f 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -52,10 +52,10 @@ async def get_and_parse_image(image_url: str, image = image.convert(image_format).resize((height, width)) image_arr = np.array(image, copy=True) + # Passed to the image processor which is loaded from HuggingFace image_tensor = torch.as_tensor(image_arr) \ .view(batch_size, height, width, num_channels) \ - .permute((0, 3, 1, 2)) \ - .to(torch.float16) + .permute((0, 3, 1, 2)) # NCHW return MultiModalData(type=MultiModalData.Type.IMAGE, data=image_tensor) diff --git a/vllm/transformers_utils/image_processor.py b/vllm/transformers_utils/image_processor.py new file mode 100644 index 000000000000..74735587645e --- /dev/null +++ b/vllm/transformers_utils/image_processor.py @@ -0,0 +1,44 @@ +from typing import Optional + +from transformers import AutoImageProcessor +from transformers.image_processing_utils import BaseImageProcessor + +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +def get_image_processor( + processor_name: str, + *args, + trust_remote_code: bool = False, + revision: Optional[str] = None, + **kwargs, +) -> BaseImageProcessor: + """Gets an image processor for the given model name via HuggingFace.""" + if revision is None: + revision = "main" + + try: + processor: BaseImageProcessor = AutoImageProcessor.from_pretrained( + processor_name, + *args, + trust_remote_code=trust_remote_code, + revision=revision, + **kwargs) + except ValueError as e: + # If the error pertains to the processor class not existing or not + # currently being imported, suggest using the --trust-remote-code flag. + # Unlike AutoTokenizer, AutoImageProcessor does not separate such errors + if not trust_remote_code: + err_msg = ( + "Failed to load the image processor. If the image processor is " + "a custom processor not yet available in the HuggingFace " + "transformers library, consider setting " + "`trust_remote_code=True` in LLM or using the " + "`--trust-remote-code` flag in the CLI.") + raise RuntimeError(err_msg) from e + else: + raise e + + return processor From 5722dd8f2c3106feaf63a7ba4b8e5dc992362162 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sun, 14 Apr 2024 08:01:32 +0000 Subject: [PATCH 26/45] Allow disabling image processor - Also fix missing arguments to config in `test_llava.py` --- docs/source/models/vlm.rst | 4 ++++ tests/models/test_llava.py | 8 ++++++-- vllm/config.py | 2 +- vllm/engine/arg_utils.py | 22 +++++++++++++++++----- vllm/engine/llm_engine.py | 8 +++++--- 5 files changed, 33 insertions(+), 11 deletions(-) diff --git a/docs/source/models/vlm.rst b/docs/source/models/vlm.rst index 10bb94f9d1fb..c4a27d2b6719 100644 --- a/docs/source/models/vlm.rst +++ b/docs/source/models/vlm.rst @@ -36,6 +36,10 @@ Apart from the :ref:`basic engine arguments `, VLMs additionally re The specific image processor version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version. +.. option:: --no-image-processor + + Disables the use of image processor, even if one is defined for the model on huggingface. + Offline Batched Inference ------------------------- diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index f86cd3fa88f5..72aeef91fbad 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -15,13 +15,17 @@ image_input_type=VisionLanguageConfig.ImageInputType.PIXEL_VALUES, image_feature_size=576, image_token_id=32000, - image_input_shape=(1, 3, 336, 336))), + image_input_shape=(1, 3, 336, 336), + image_processor=None, + image_processor_revision=None)), ("llava-hf/llava-1.5-7b-hf", VisionLanguageConfig( image_input_type=VisionLanguageConfig.ImageInputType.IMAGE_FEATURES, image_feature_size=576, image_token_id=32000, - image_input_shape=(1, 576, 1024))) + image_input_shape=(1, 576, 1024), + image_processor=None, + image_processor_revision=None)) ] diff --git a/vllm/config.py b/vllm/config.py index 971f69e4bb48..d92f061e1b69 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -921,7 +921,7 @@ class ImageOpenAI(enum.Enum): image_input_shape: tuple image_feature_size: int # The image processor to load from HuggingFace - image_processor: str + image_processor: Optional[str] image_processor_revision: Optional[str] image_openai: ImageOpenAI = ImageOpenAI.SINGLE_IMAGE diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 922de15c60e8..72b839d79bcd 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -79,6 +79,7 @@ class EngineArgs: image_feature_size: Optional[int] = None image_processor: Optional[str] = None image_processor_revision: Optional[str] = None + no_image_processor: bool = False image_openai: str = VisionLanguageConfig.ImageOpenAI.SINGLE_IMAGE.name scheduler_delay_factor: float = 0.0 @@ -92,11 +93,6 @@ def __post_init__(self): if self.tokenizer is None: self.tokenizer = self.model - if (self.image_processor is None - # Only attempt to load image processor if VLM config is given - and self.image_input_type is not None): - self.image_processor = self.model - @staticmethod def add_cli_args( parser: argparse.ArgumentParser) -> argparse.ArgumentParser: @@ -423,6 +419,11 @@ def add_cli_args( help='the specific image processor version to use. It can be a ' 'branch name, a tag name, or a commit id. If unspecified, will use ' 'the default version.') + parser.add_argument( + '--no-image-processor', + action='store_true', + help='Disables the use of image processor, even if one is defined ' + 'for the model on huggingface.') parser.add_argument( '--scheduler-delay-factor', @@ -528,6 +529,17 @@ def create_engine_config(self, ) -> EngineConfig: raise ValueError( 'Specify `image_token_id`, `image_input_shape` and ' '`image_feature_size` together with `image_input_type`.') + + if self.image_processor is None: + self.image_processor = self.model + if self.no_image_processor: + if self.image_processor != self.model: + raise ValueError( + 'Do not specify `image_processor` when it is disabled ' + 'by `--no-image-processor`.') + + self.image_processor = None + vision_language_config = VisionLanguageConfig( image_input_type=VisionLanguageConfig. get_image_input_enum_type(self.image_input_type), diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 4116356cddae..fc460ab32a3c 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -267,13 +267,15 @@ def _init_tokenizer(self, **tokenizer_init_kwargs): self.parallel_config.tokenizer_pool_config, **init_kwargs) def _init_image_processor(self, **processor_init_kwargs): - if self.vision_language_config is None: + vlm_config = self.vision_language_config + + if vlm_config is None or vlm_config.image_processor is None: self.image_processor = None else: self.image_processor = get_image_processor( - self.vision_language_config.image_processor, + vlm_config.image_processor, trust_remote_code=self.model_config.trust_remote_code, - revision=self.vision_language_config.image_processor_revision, + revision=vlm_config.image_processor_revision, **processor_init_kwargs, ) From 6e1fa6719bf206e56b99b9c2386fa94f5badcd79 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Mon, 15 Apr 2024 02:57:06 +0000 Subject: [PATCH 27/45] Fix errors when running the example and tests --- examples/llava_example.py | 2 ++ tests/conftest.py | 28 ++++++++++--------- .../entrypoints/test_openai_server_vision.py | 2 -- tests/models/test_llava.py | 4 +++ vllm/engine/llm_engine.py | 9 ++++-- vllm/entrypoints/llm.py | 18 ++++++++---- 6 files changed, 41 insertions(+), 22 deletions(-) diff --git a/examples/llava_example.py b/examples/llava_example.py index dda93c1b258a..43ae9458dab7 100644 --- a/examples/llava_example.py +++ b/examples/llava_example.py @@ -17,6 +17,7 @@ def run_llava_pixel_values(): image_token_id=32000, image_input_shape="1,3,336,336", image_feature_size=576, + no_image_processor=True, ) prompt = "" * 576 + ( @@ -40,6 +41,7 @@ def run_llava_image_features(): image_token_id=32000, image_input_shape="1,576,1024", image_feature_size=576, + no_image_processor=True, ) prompt = "" * 576 + ( diff --git a/tests/conftest.py b/tests/conftest.py index 743f695f8608..eb506dbdecf0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -85,17 +85,15 @@ def hf_images() -> List[Image.Image]: @pytest.fixture() -def vllm_images(request) -> "torch.Tensor": +def vllm_images(request) -> List[torch.Tensor]: vision_language_config = request.getfixturevalue("model_and_config")[1] - all_images = [] if vision_language_config.image_input_type == ( VisionLanguageConfig.ImageInputType.IMAGE_FEATURES): filenames = _IMAGE_FEATURES_FILES else: filenames = _PIXEL_VALUES_FILES - for filename in filenames: - all_images.append(torch.load(filename)) - return torch.concat(all_images, dim=0) + + return [torch.load(filename) for filename in filenames] @pytest.fixture() @@ -172,9 +170,10 @@ def generate( images: Optional[List[Image.Image]] = None, **kwargs, ) -> List[Tuple[List[int], str]]: - outputs: List[Tuple[List[int], str]] = [] if images: assert len(prompts) == len(images) + + outputs: List[Tuple[List[int], str]] = [] for i, prompt in enumerate(prompts): if self.model_name not in _VISION_LANGUAGE_MODELS: input_ids = self.tokenizer(prompt, @@ -207,7 +206,7 @@ def generate_greedy( self, prompts: List[str], max_tokens: int, - images: Optional["torch.Tensor"] = None, + images: Optional[List[Image.Image]] = None, ) -> List[Tuple[List[int], str]]: outputs = self.generate(prompts, do_sample=False, @@ -316,16 +315,19 @@ def generate( self, prompts: List[str], sampling_params: SamplingParams, - images: Optional["torch.Tensor"] = None, + images: Optional[List[torch.Tensor]] = None, ) -> List[Tuple[List[int], str]]: if images is not None: - assert len(prompts) == images.shape[0] + assert len(prompts) == len(images) + req_outputs = self.model.generate( prompts, sampling_params=sampling_params, - multi_modal_datas=MultiModalData(type=MultiModalData.Type.IMAGE, - data=images) - if images is not None else None) + multi_modal_datas=[ + MultiModalData(type=MultiModalData.Type.IMAGE, data=image) + for image in images + ] if images is not None else None) + outputs = [] for req_output in req_outputs: prompt_str = req_output.prompt @@ -362,7 +364,7 @@ def generate_greedy( self, prompts: List[str], max_tokens: int, - images: Optional[torch.Tensor] = None, + images: Optional[List[torch.Tensor]] = None, ) -> List[Tuple[List[int], str]]: greedy_params = SamplingParams(temperature=0.0, max_tokens=max_tokens) outputs = self.generate(prompts, greedy_params, images=images) diff --git a/tests/entrypoints/test_openai_server_vision.py b/tests/entrypoints/test_openai_server_vision.py index e533195f1d57..f50537bd41a7 100644 --- a/tests/entrypoints/test_openai_server_vision.py +++ b/tests/entrypoints/test_openai_server_vision.py @@ -28,8 +28,6 @@ def server(): ray.init() server_runner = ServerRunner.remote([ - "--port", - "8001", "--model", MODEL_NAME, # use half precision for speed and memory savings in CI environment diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index 72aeef91fbad..a8f33d1bd22c 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -43,6 +43,10 @@ def as_dict(vision_language_config: VisionLanguageConfig) -> Dict: result[field.name] = ",".join([str(item) for item in value]) else: result[field.name] = value + + result[ + "no_image_processor"] = vision_language_config.image_processor is None + return result diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index fc460ab32a3c..452b0dd604e8 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -310,8 +310,13 @@ def process_multi_modal_data(self, data: MultiModalData) -> MultiModalData: if image_processor is None: return data - out_dict = image_processor.preprocess(data.data) \ - .convert_to_tensors(TensorType.PYTORCH) + try: + out_dict = image_processor.preprocess(data.data) \ + .convert_to_tensors(TensorType.PYTORCH) + except Exception: + logger.error("Failed to process image with shape %s", + data.data.shape) + raise return MultiModalData( type=data.type, diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 6cb2501368a6..be0a58c91dd8 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -160,14 +160,13 @@ def generate( prompts = [prompts] if (prompts is not None and prompt_token_ids is not None and len(prompts) != len(prompt_token_ids)): - raise ValueError("The lengths of prompts and prompt_token_ids " - "must be the same.") + raise ValueError( + f"The lengths of prompts ({len(prompts)}) and " + f"prompt_token_ids ({len(prompt_token_ids)}) must be the same." + ) if sampling_params is None: # Use default sampling params. sampling_params = SamplingParams() - if isinstance(multi_modal_datas, MultiModalData): - # Convert a single multi_modal_data to a list. - multi_modal_datas = [multi_modal_datas] # Add requests to the engine. if prompts is not None: @@ -176,6 +175,15 @@ def generate( assert prompt_token_ids is not None num_requests = len(prompt_token_ids) + if isinstance(multi_modal_datas, MultiModalData): + # Convert a single multi_modal_data to a list. + multi_modal_datas = [multi_modal_datas] + if (multi_modal_datas is not None + and len(multi_modal_datas) != num_requests): + raise ValueError(f"The lengths of prompts/prompt_token_ids " + f"({num_requests}) and multi_modal_datas " + f"({len(multi_modal_datas)}) must be the same.") + for i in range(num_requests): prompt = prompts[i] if prompts is not None else None token_ids = None if prompt_token_ids is None else prompt_token_ids[ From 21434df5e1a29281ee7b0ed241dca197e19f6d80 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Tue, 16 Apr 2024 06:40:21 +0000 Subject: [PATCH 28/45] Add test for loading image processor by revision --- tests/tokenization/test_image_processor.py | 19 +++++++++++++++++++ vllm/transformers_utils/image_processor.py | 3 --- 2 files changed, 19 insertions(+), 3 deletions(-) create mode 100644 tests/tokenization/test_image_processor.py diff --git a/tests/tokenization/test_image_processor.py b/tests/tokenization/test_image_processor.py new file mode 100644 index 000000000000..d7989bd623e2 --- /dev/null +++ b/tests/tokenization/test_image_processor.py @@ -0,0 +1,19 @@ +import pytest +from transformers.image_processing_utils import BaseImageProcessor + +from vllm.transformers_utils.image_processor import get_image_processor + +IMAGE_PROCESSOR_NAMES = [ + "llava-hf/llava-1.5-7b-hf", +] + + +@pytest.mark.parametrize("processor_name", IMAGE_PROCESSOR_NAMES) +def test_image_processor_revision(processor_name: str): + # Assume that "main" branch always exists + image_processor = get_image_processor(processor_name, revision="main") + assert isinstance(image_processor, BaseImageProcessor) + + # Assume that "never" branch always does not exist + with pytest.raises(OSError, match='not a valid git identifier'): + get_image_processor(processor_name, revision="never") diff --git a/vllm/transformers_utils/image_processor.py b/vllm/transformers_utils/image_processor.py index 74735587645e..2bb5215d4846 100644 --- a/vllm/transformers_utils/image_processor.py +++ b/vllm/transformers_utils/image_processor.py @@ -16,9 +16,6 @@ def get_image_processor( **kwargs, ) -> BaseImageProcessor: """Gets an image processor for the given model name via HuggingFace.""" - if revision is None: - revision = "main" - try: processor: BaseImageProcessor = AutoImageProcessor.from_pretrained( processor_name, From a5907b028b17e3c078d47a0627bf58c4a5ce6243 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Tue, 16 Apr 2024 12:20:27 +0000 Subject: [PATCH 29/45] Temporary patch for llava-1.5-13b to facilitate testing --- tests/conftest.py | 2 ++ tests/models/test_llava.py | 34 ++++++++++++++------------ vllm/model_executor/models/__init__.py | 1 + 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index eb506dbdecf0..7bc99bc0858a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -129,6 +129,8 @@ def example_long_prompts() -> List[str]: _VISION_LANGUAGE_MODELS = { "llava-hf/llava-1.5-7b-hf": LlavaForConditionalGeneration, + "llava-hf/llava-1.5-13b-hf": LlavaForConditionalGeneration, + "llava-hf/bakLlava-v1-hf": LlavaForConditionalGeneration, } diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index a8f33d1bd22c..84e920cee306 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -9,23 +9,25 @@ from vllm.config import VisionLanguageConfig + +def iter_llava_configs(model_name: str): + for input_type, input_shape in [ + (VisionLanguageConfig.ImageInputType.PIXEL_VALUES, (1, 3, 336, 336)), + (VisionLanguageConfig.ImageInputType.IMAGE_FEATURES, (1, 576, 1024)), + ]: + yield (model_name, + VisionLanguageConfig(image_input_type=input_type, + image_feature_size=576, + image_token_id=32000, + image_input_shape=input_shape, + image_processor=None, + image_processor_revision=None)) + + model_and_vl_config = [ - ("llava-hf/llava-1.5-7b-hf", - VisionLanguageConfig( - image_input_type=VisionLanguageConfig.ImageInputType.PIXEL_VALUES, - image_feature_size=576, - image_token_id=32000, - image_input_shape=(1, 3, 336, 336), - image_processor=None, - image_processor_revision=None)), - ("llava-hf/llava-1.5-7b-hf", - VisionLanguageConfig( - image_input_type=VisionLanguageConfig.ImageInputType.IMAGE_FEATURES, - image_feature_size=576, - image_token_id=32000, - image_input_shape=(1, 576, 1024), - image_processor=None, - image_processor_revision=None)) + *iter_llava_configs("llava-hf/llava-1.5-7b-hf"), + *iter_llava_configs("llava-hf/llava-1.5-13b-hf"), + *iter_llava_configs("llava-hf/bakLlava-v1-hf"), ] diff --git a/vllm/model_executor/models/__init__.py b/vllm/model_executor/models/__init__.py index 17fc97056804..ff45838f34e7 100755 --- a/vllm/model_executor/models/__init__.py +++ b/vllm/model_executor/models/__init__.py @@ -33,6 +33,7 @@ "LlamaForCausalLM": ("llama", "LlamaForCausalLM"), "LlavaForConditionalGeneration": ("llava", "LlavaForConditionalGeneration"), + "LlavaForCausalLM": ("llava", "LlavaForConditionalGeneration"), # For decapoda-research/llama-* "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"), "MistralForCausalLM": ("llama", "LlamaForCausalLM"), From c126646deb6ead2675df065e43ead2713b06a101 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Wed, 17 Apr 2024 08:28:25 +0000 Subject: [PATCH 30/45] Fix issue with pickling config when serving LLaVA with multiple GPUs --- vllm/config.py | 21 +++++++++++++++++---- vllm/entrypoints/openai/serving_chat.py | 2 +- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 6a16701b29af..9d78f63631e3 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2,7 +2,8 @@ import json import os from dataclasses import dataclass, field, fields -from typing import TYPE_CHECKING, ClassVar, List, Optional, Protocol, Union +from typing import (TYPE_CHECKING, ClassVar, Dict, List, Optional, Protocol, + Union) import torch from packaging.version import Version @@ -903,9 +904,9 @@ class ImageOpenAI(enum.Enum): """Specifies how the model implements `OpenAI's GPT-4 with Vision API `_. """ - UNSUPPORTED = OpenAIVisionAdapterForNoImage() - SINGLE_IMAGE = OpenAIVisionAdapterForSingleImage() - MULTI_IMAGE = OpenAIVisionAdapterForMultiImage() + UNSUPPORTED = enum.auto() + SINGLE_IMAGE = enum.auto() + MULTI_IMAGE = enum.auto() image_input_type: ImageInputType # The input id corresponding to image token. @@ -920,6 +921,12 @@ class ImageOpenAI(enum.Enum): image_processor_revision: Optional[str] image_openai: ImageOpenAI = ImageOpenAI.SINGLE_IMAGE + _image_openai_processors: ClassVar[Dict[ + ImageOpenAI, OpenAIVisionAdapter]] = { + ImageOpenAI.UNSUPPORTED: OpenAIVisionAdapterForNoImage(), + ImageOpenAI.SINGLE_IMAGE: OpenAIVisionAdapterForSingleImage(), + ImageOpenAI.MULTI_IMAGE: OpenAIVisionAdapterForMultiImage(), + } @classmethod def get_image_input_enum_type(cls, value: str) -> ImageInputType: @@ -941,6 +948,12 @@ def get_image_openai_enum_type(cls, value: str) -> ImageOpenAI: f"Expecting to choose from " f"{[x.name for x in cls.ImageOpenAI]}.") from e + def get_image_token_text(self, config: "VisionLanguageConfig", + tokenizer: PreTrainedTokenizerBase, + image_idx: int) -> str: + return self._image_openai_processors[self.image_openai] \ + .get_image_token_text(config, tokenizer, image_idx) + _STR_DTYPE_TO_TORCH_DTYPE = { "half": torch.float16, diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 5723690fca57..1911dee1fc9a 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -107,7 +107,7 @@ def _parse_chat_message_image_input( if image_url.get("detail", "auto") != "auto": logger.info("content[%s].image_url.detail is ignored", i) - text = config.image_openai.value.get_image_token_text( + text = config.get_image_token_text( config, tokenizer, image_idx=len(image_futures)) image_future = get_and_parse_image(image_url["url"], config) From 11e9921337170db8a106e9881310fdb7b7fcf154 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 01:47:28 +0000 Subject: [PATCH 31/45] Add TODO to test --- tests/models/test_llava.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index 84e920cee306..c4536e9bc9b1 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -115,3 +115,7 @@ def test_models(hf_runner, vllm_runner, hf_image_prompts, hf_images, f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") assert hf_output_ids == vllm_output_ids, ( f"Test{i}:\nHF: {hf_output_ids}\nvLLM: {vllm_output_ids}") + + +# TODO: Add test for `tensor_parallel_size` [ref: PR #3883] +# (Requires multiple GPUs) From 7ae80a2249c50f2d38e5eb0d66c5e4296a32714a Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 02:29:37 +0000 Subject: [PATCH 32/45] Try to avoid OOM by using `--enforce-eager` --- tests/models/test_llava.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index c4536e9bc9b1..933a8e7201b8 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -98,6 +98,7 @@ def test_models(hf_runner, vllm_runner, hf_image_prompts, hf_images, vllm_model = vllm_runner(model_id, dtype=dtype, worker_use_ray=worker_use_ray, + enforce_eager=True, **as_dict(vision_language_config)) vllm_outputs = vllm_model.generate_greedy(vllm_image_prompts, max_tokens, From 2610bea36d2b493c438e36e35fa1b2ec5e62d876 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 03:23:13 +0000 Subject: [PATCH 33/45] Reduce number of models to test to avoid OOM --- tests/models/test_llava.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index 933a8e7201b8..cee588617fa4 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -27,7 +27,6 @@ def iter_llava_configs(model_name: str): model_and_vl_config = [ *iter_llava_configs("llava-hf/llava-1.5-7b-hf"), *iter_llava_configs("llava-hf/llava-1.5-13b-hf"), - *iter_llava_configs("llava-hf/bakLlava-v1-hf"), ] From 5ad2b679337c1b0fe2103610d2ec85c93a052f1a Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 03:44:23 +0000 Subject: [PATCH 34/45] Try testing 13b model only --- tests/models/test_llava.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index cee588617fa4..7208f8236f70 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -25,7 +25,7 @@ def iter_llava_configs(model_name: str): model_and_vl_config = [ - *iter_llava_configs("llava-hf/llava-1.5-7b-hf"), + # *iter_llava_configs("llava-hf/llava-1.5-7b-hf"), *iter_llava_configs("llava-hf/llava-1.5-13b-hf"), ] From 696357b6e99bc99043f5a57572d41236b3e592de Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 04:48:43 +0000 Subject: [PATCH 35/45] Refactor image processing, `MultiModalData` and LLaVA model - Remove channel conversion and resizing from OpenAI server preprocessing since the image processor in HuggingFace should be able to handle that - `MultiModalData` is now an abstract class that outputs additional kwargs to be input into the model. This was intially done to support LLaVA-NeXT's `image_size` parameter but can be extended to other models as well. - The application of image processor is now defined inside `MultiModalData` so that there is no need to extensively edit the engine to support other types of data - New `MultiModalData` subclasses: `ImagePixelData` and `ImageFeatureData` to better differentiate the two cases of image input - Refactored LLaVA-1.5 model to make it easier to inherit for defining LLaVA-NeXT model --- docs/source/models/vlm.rst | 6 +- examples/llava_example.py | 14 +- tests/conftest.py | 26 +-- tests/models/test_llava.py | 8 +- vllm/core/scheduler.py | 6 +- vllm/engine/llm_engine.py | 58 ++----- vllm/entrypoints/openai/serving_chat.py | 55 ++----- vllm/model_executor/model_loader/loader.py | 7 +- vllm/model_executor/models/llava.py | 174 ++++++++++++++++----- vllm/sequence.py | 85 +++++++--- vllm/transformers_utils/image_processor.py | 4 + vllm/worker/model_runner.py | 99 +++++++----- 12 files changed, 324 insertions(+), 218 deletions(-) diff --git a/docs/source/models/vlm.rst b/docs/source/models/vlm.rst index c4a27d2b6719..a33426a0d17a 100644 --- a/docs/source/models/vlm.rst +++ b/docs/source/models/vlm.rst @@ -58,7 +58,7 @@ To initialize a VLM, the aforementioned arguments must be passed to the ``LLM`` For now, we only support a single image per text prompt when calling ``llm.generate``. To pass an image to the model, note the following parameters: * ``prompt``: The prompt should have a number of ```` tokens equal to ``image_feature_size``. -* ``multi_modal_data``: This should be an instance of ``MultiModalData`` with type ``MultiModalData.Type.IMAGE`` and its data set to a single image tensor with the shape ``image_input_shape``. +* ``multi_modal_datas``: This should be an instance of ``ImagePixelData``. .. code-block:: python @@ -68,9 +68,7 @@ For now, we only support a single image per text prompt when calling ``llm.gener # Load the image and reshape to (1, 3, 336, 336) image = ... - outputs = llm.generate(prompt, - multi_modal_datas=MultiModalData( - type=MultiModalData.Type.IMAGE, data=image)) + outputs = llm.generate(prompt, multi_modal_datas=ImagePixelData(image)) for o in outputs: generated_text = o.outputs[0].text diff --git a/examples/llava_example.py b/examples/llava_example.py index 43ae9458dab7..1d66b7a673a8 100644 --- a/examples/llava_example.py +++ b/examples/llava_example.py @@ -5,7 +5,7 @@ import torch from vllm import LLM -from vllm.sequence import MultiModalData +from vllm.sequence import ImageFeatureData, ImagePixelData # The assets are located at `s3://air-example-data-2/vllm_opensource_llava/`. @@ -24,11 +24,9 @@ def run_llava_pixel_values(): "\nUSER: What is the content of this image?\nASSISTANT:") # This should be provided by another online or offline component. - images = torch.load("images/stop_sign_pixel_values.pt") + image = torch.load("images/stop_sign_pixel_values.pt") - outputs = llm.generate(prompt, - multi_modal_datas=MultiModalData( - type=MultiModalData.Type.IMAGE, data=images)) + outputs = llm.generate(prompt, multi_modal_datas=ImagePixelData(image)) for o in outputs: generated_text = o.outputs[0].text print(generated_text) @@ -48,11 +46,9 @@ def run_llava_image_features(): "\nUSER: What is the content of this image?\nASSISTANT:") # This should be provided by another online or offline component. - images = torch.load("images/stop_sign_image_features.pt") + image = torch.load("images/stop_sign_image_features.pt") - outputs = llm.generate(prompt, - multi_modal_datas=MultiModalData( - type=MultiModalData.Type.IMAGE, data=images)) + outputs = llm.generate(prompt, multi_modal_datas=ImageFeatureData(image)) for o in outputs: generated_text = o.outputs[0].text print(generated_text) diff --git a/tests/conftest.py b/tests/conftest.py index 7bc99bc0858a..b40225f0af74 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -172,7 +172,7 @@ def generate( images: Optional[List[Image.Image]] = None, **kwargs, ) -> List[Tuple[List[int], str]]: - if images: + if images is not None: assert len(prompts) == len(images) outputs: List[Tuple[List[int], str]] = [] @@ -182,6 +182,7 @@ def generate( return_tensors="pt").input_ids inputs = {"input_ids": input_ids.cuda()} else: + assert self.processor is not None image = images[i] if images else None inputs = self.processor(text=prompt, images=image, @@ -190,6 +191,7 @@ def generate( key: value.cuda() if value is not None else None for key, value in inputs.items() } + output_ids = self.model.generate( **inputs, use_cache=True, @@ -317,18 +319,14 @@ def generate( self, prompts: List[str], sampling_params: SamplingParams, - images: Optional[List[torch.Tensor]] = None, + multi_modal_datas: Optional[List[Optional[MultiModalData]]] = None, ) -> List[Tuple[List[int], str]]: - if images is not None: - assert len(prompts) == len(images) + if multi_modal_datas is not None: + assert len(prompts) == len(multi_modal_datas) - req_outputs = self.model.generate( - prompts, - sampling_params=sampling_params, - multi_modal_datas=[ - MultiModalData(type=MultiModalData.Type.IMAGE, data=image) - for image in images - ] if images is not None else None) + req_outputs = self.model.generate(prompts, + sampling_params=sampling_params, + multi_modal_datas=multi_modal_datas) outputs = [] for req_output in req_outputs: @@ -366,10 +364,12 @@ def generate_greedy( self, prompts: List[str], max_tokens: int, - images: Optional[List[torch.Tensor]] = None, + multi_modal_datas: Optional[List[Optional[MultiModalData]]] = None, ) -> List[Tuple[List[int], str]]: greedy_params = SamplingParams(temperature=0.0, max_tokens=max_tokens) - outputs = self.generate(prompts, greedy_params, images=images) + outputs = self.generate(prompts, + greedy_params, + multi_modal_datas=multi_modal_datas) return [(output_ids[0], output_str[0]) for output_ids, output_str in outputs] diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index 7208f8236f70..db0159a02b42 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -8,6 +8,7 @@ from transformers import AutoTokenizer from vllm.config import VisionLanguageConfig +from vllm.sequence import ImagePixelData def iter_llava_configs(model_name: str): @@ -99,9 +100,10 @@ def test_models(hf_runner, vllm_runner, hf_image_prompts, hf_images, worker_use_ray=worker_use_ray, enforce_eager=True, **as_dict(vision_language_config)) - vllm_outputs = vllm_model.generate_greedy(vllm_image_prompts, - max_tokens, - images=vllm_images) + vllm_outputs = vllm_model.generate_greedy( + vllm_image_prompts, + max_tokens, + multi_modal_datas=[ImagePixelData(image) for image in vllm_images]) del vllm_model gc.collect() diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py index 419855062103..5314bdb969d0 100644 --- a/vllm/core/scheduler.py +++ b/vllm/core/scheduler.py @@ -922,11 +922,11 @@ def schedule(self) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs]: lora_request=seq_group.lora_request, computed_block_nums=common_computed_block_nums, state=seq_group.state, - # `multi_modal_data` will only be present for the 1st comm + # `multi_modal_kwargs` will only be present for the 1st comm # between engine and worker. # the subsequent comms can still use delta, but - # `multi_modal_data` will be None. - multi_modal_data=seq_group.multi_modal_data + # `multi_modal_kwargs` will be None. + multi_modal_kwargs=seq_group.multi_modal_kwargs if scheduler_outputs.num_prefill_groups > 0 else None, ) seq_group_metadata_list.append(seq_group_metadata) diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 644bef425efe..e8bd34c48b3f 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -1,7 +1,7 @@ import time from typing import Iterable, List, Optional, Type, Union -from transformers import PreTrainedTokenizer, TensorType +from transformers import PreTrainedTokenizer import vllm from vllm.config import (CacheConfig, DecodingConfig, DeviceConfig, LoadConfig, @@ -24,7 +24,6 @@ from vllm.sequence import (MultiModalData, SamplerOutput, Sequence, SequenceGroup) from vllm.transformers_utils.detokenizer import Detokenizer -from vllm.transformers_utils.image_processor import get_image_processor from vllm.transformers_utils.tokenizer_group import (BaseTokenizerGroup, get_tokenizer_group) from vllm.usage.usage_lib import (UsageContext, is_usage_stats_enabled, @@ -123,7 +122,6 @@ def __init__( self.log_stats = log_stats self._init_tokenizer() - self._init_image_processor() self.detokenizer = Detokenizer(self.tokenizer) self.seq_counter = Counter() @@ -288,19 +286,6 @@ def _init_tokenizer(self, **tokenizer_init_kwargs): self.tokenizer: BaseTokenizerGroup = get_tokenizer_group( self.parallel_config.tokenizer_pool_config, **init_kwargs) - def _init_image_processor(self, **processor_init_kwargs): - vlm_config = self.vision_language_config - - if vlm_config is None or vlm_config.image_processor is None: - self.image_processor = None - else: - self.image_processor = get_image_processor( - vlm_config.image_processor, - trust_remote_code=self.model_config.trust_remote_code, - revision=vlm_config.image_processor_revision, - **processor_init_kwargs, - ) - def _verify_args(self) -> None: self.model_config.verify_with_parallel_config(self.parallel_config) self.cache_config.verify_with_parallel_config(self.parallel_config) @@ -323,28 +308,6 @@ def encode_request( lora_request=lora_request) return prompt_token_ids - def process_multi_modal_data(self, data: MultiModalData) -> MultiModalData: - if data.type == MultiModalData.Type.IMAGE: - image_processor = self.image_processor - if image_processor is None: - return data - - try: - out_dict = image_processor.preprocess(data.data) \ - .convert_to_tensors(TensorType.PYTORCH) - except Exception: - logger.error("Failed to process image with shape %s", - data.data.shape) - raise - - return MultiModalData( - type=data.type, - data=out_dict["pixel_values"], - ) - else: - msg = f"Unknown data type: {data.type}" - raise NotImplementedError(msg) - def add_request( self, request_id: str, @@ -399,7 +362,10 @@ def add_request( if lora_request is not None and not self.lora_config: raise ValueError(f"Got lora_request {lora_request} but LoRA is " "not enabled!") - max_logprobs = self.get_model_config().max_logprobs + + model_config = self.get_model_config() + + max_logprobs = model_config.max_logprobs if (sampling_params.logprobs and sampling_params.logprobs > max_logprobs) or ( sampling_params.prompt_logprobs @@ -430,12 +396,20 @@ def add_request( sampling_params.eos_token_id = seq.eos_token_id # Process multi-modal data - if multi_modal_data is not None: - multi_modal_data = self.process_multi_modal_data(multi_modal_data) + if multi_modal_data is None: + mm_kwargs = {} + else: + vlm_config = self.vision_language_config + assert vlm_config is not None, ( + "Multi-modal inputs are only supported by " + "vision language models.") + + mm_kwargs = multi_modal_data.get_input_kwargs( + self.model_config, vlm_config) # Create the sequence group. seq_group = SequenceGroup(request_id, [seq], sampling_params, - arrival_time, lora_request, multi_modal_data) + arrival_time, lora_request, mm_kwargs) # Add the sequence group to the scheduler. self.scheduler.add_seq_group(seq_group) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 1911dee1fc9a..b817f10bf571 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -22,44 +22,12 @@ from vllm.model_executor.guided_decoding import ( get_guided_decoding_logits_processor) from vllm.outputs import RequestOutput -from vllm.sequence import MultiModalData +from vllm.sequence import ImagePixelData from vllm.utils import get_image_async, random_uuid logger = init_logger(__name__) -async def get_and_parse_image(image_url: str, - config: VisionLanguageConfig) -> MultiModalData: - - if len(config.image_input_shape) == 3: - raise ValueError( - "The model is configured to accept image features rather than " - "pixel values, and thus does not support image inputs") - - batch_size, num_channels, height, width = config.image_input_shape - - if num_channels == 1: - image_format = "L" - elif num_channels == 3: - image_format = "RGB" - elif num_channels == 4: - image_format = "RGBA" - else: - msg = f"Unsupported number of channels ({num_channels})" - raise NotImplementedError(msg) - - with await get_image_async(image_url) as image: - image = image.convert(image_format).resize((height, width)) - image_arr = np.array(image, copy=True) - - # Passed to the image processor which is loaded from HuggingFace - image_tensor = torch.as_tensor(image_arr) \ - .view(batch_size, height, width, num_channels) \ - .permute((0, 3, 1, 2)) # NCHW - - return MultiModalData(type=MultiModalData.Type.IMAGE, data=image_tensor) - - @final # So that it should be compatible with Dict[str, str] class ConversationMessage(TypedDict): role: str @@ -80,11 +48,22 @@ def __init__(self, self.response_role = response_role self._load_chat_template(chat_template) + async def _get_and_parse_image(self, image_url: str) -> ImagePixelData: + with await get_image_async(image_url) as image: + image_arr = np.array(image, copy=True) + + # Passed to the image processor which is loaded from HuggingFace + image_tensor = torch.as_tensor(image_arr) \ + .view(1, image.height, image.width, -1) \ + .permute((0, 3, 1, 2)) # NCHW + + return ImagePixelData(image_tensor) + def _parse_chat_message_image_input( self, role: ChatCompletionRole, content: Iterable[ChatCompletionContentPartParam], - ) -> Tuple[List[ConversationMessage], List[Awaitable[MultiModalData]]]: + ) -> Tuple[List[ConversationMessage], List[Awaitable[ImagePixelData]]]: """Parse image input defined by OpenAI Chat Completions API.""" config = getattr(self.engine.engine, "vision_language_config", None) if not isinstance(config, VisionLanguageConfig): @@ -95,7 +74,7 @@ def _parse_chat_message_image_input( assert tokenizer is not None texts: List[str] = [] - image_futures: List[Awaitable[MultiModalData]] = [] + image_futures: List[Awaitable[ImagePixelData]] = [] for i, part in enumerate(content): if part["type"] == "text": @@ -109,7 +88,7 @@ def _parse_chat_message_image_input( text = config.get_image_token_text( config, tokenizer, image_idx=len(image_futures)) - image_future = get_and_parse_image(image_url["url"], config) + image_future = self._get_and_parse_image(image_url["url"]) texts.append(text) image_futures.append(image_future) @@ -126,7 +105,7 @@ def _parse_chat_message_content( role: ChatCompletionRole, content: Optional[Union[str, Iterable[ChatCompletionContentPartParam]]], - ) -> Tuple[List[ConversationMessage], List[Awaitable[MultiModalData]]]: + ) -> Tuple[List[ConversationMessage], List[Awaitable[ImagePixelData]]]: if content is None: return [], [] if isinstance(content, str): @@ -153,7 +132,7 @@ async def create_chat_completion( try: conversation: List[ConversationMessage] = [] - multi_modal_futures: List[Awaitable[MultiModalData]] = [] + multi_modal_futures: List[Awaitable[ImagePixelData]] = [] for m in request.messages: messages, futures = self._parse_chat_message_content( diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 3b1d125ef8a6..0a260599c543 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -22,15 +22,10 @@ download_weights_from_hf, filter_files_not_needed_for_inference, get_quant_config, initialize_dummy_weights, np_cache_weights_iterator, pt_weights_iterator, safetensors_weights_iterator) -from vllm.model_executor.models.llava import LlavaForConditionalGeneration if TYPE_CHECKING: from vllm.model_executor.layers.linear import LinearMethodBase -_VISION_MODEL_CLASSES = [ - LlavaForConditionalGeneration, -] - logger = init_logger(__name__) @@ -74,7 +69,7 @@ def _get_model_initialization_kwargs( "but LoRA is enabled. Support for this model may " "be added in the future. If this is important to you, " "please open an issue on github.") - elif model_class in _VISION_MODEL_CLASSES: + elif getattr(model_class, "is_vlm", False): extra_kwargs["vision_language_config"] = vision_language_config return extra_kwargs diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index 314a2792bf16..33dda575b2af 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -1,4 +1,5 @@ -from typing import Iterable, List, Optional, Tuple +from typing import (ClassVar, Iterable, List, Literal, Optional, Tuple, + TypedDict, Union) import torch from torch import nn @@ -39,7 +40,7 @@ def __init__(self, vision_hidden_size: int, text_hidden_size: int, text_hidden_size, bias=True) - def forward(self, image_features): + def forward(self, image_features: torch.Tensor): hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) @@ -49,20 +50,43 @@ def forward(self, image_features): def _merge_vision_embeddings(input_ids: torch.Tensor, inputs_embeds: torch.Tensor, vision_embeddings: torch.Tensor, - image_token_id: int): + image_token_id: int) -> torch.Tensor: """In place merges in vision_embeddings with inputs_embeds.""" mask = (input_ids == image_token_id) inputs_embeds[mask] = vision_embeddings.view(-1, vision_embeddings.shape[-1]) + return inputs_embeds + + +class LlavaImagePixelInputs(TypedDict): + type: Literal["pixel_values"] + data: torch.Tensor + """Shape: (batch_size, num_channels, height, width)""" + + +class LlavaImageFeatureInputs(TypedDict): + type: Literal["image_features"] + data: torch.Tensor + """Shape: (batch_size, image_feature_size, hidden_size)""" + + +LlavaImageInputs = Union[LlavaImagePixelInputs, LlavaImageFeatureInputs] + class LlavaForConditionalGeneration(nn.Module): + is_vlm: ClassVar[bool] = True + """Indicates that the model is a vision-language model and thus accepts + the `vision_language_config` parameter. + """ + def __init__(self, - config: "LlavaConfig", + config: LlavaConfig, vision_language_config: VisionLanguageConfig, - linear_method: Optional["LinearMethodBase"] = None) -> None: + linear_method: Optional[LinearMethodBase] = None) -> None: super().__init__() + self.config = config self.vision_language_config = vision_language_config @@ -95,13 +119,108 @@ def __init__(self, config.vocab_size, logit_scale) self.sampler = Sampler() + def _validate_image_data(self, data: torch.Tensor) -> torch.Tensor: + if list(data.shape[1:]) != list( + self.vision_language_config.image_input_shape[1:]): + raise ValueError( + f"The expected image tensor shape is batch dimension plus " + f"{self.vision_language_config.image_input_shape[1:]}." + f" You supplied {data.shape}. " + f"If you are using vLLM's entrypoint, make sure your " + f"supplied image input is consistent with " + f"image_input_shape in engine args.") + + return data + + def _parse_and_validate_image_input( + self, **kwargs: object) -> Optional[LlavaImageInputs]: + pixel_values = kwargs.pop("pixel_values", None) + image_features = kwargs.pop("image_features", None) + + expected_input_type = self.vision_language_config.image_input_type + ImageInputType = VisionLanguageConfig.ImageInputType + + if expected_input_type == ImageInputType.PIXEL_VALUES: + if image_features is not None: + raise ValueError( + "Expected pixel values but got image features") + if pixel_values is None: + return None + + if not isinstance(pixel_values, torch.Tensor): + raise ValueError("Incorrect type of pixel values") + + return LlavaImagePixelInputs( + type="pixel_values", + data=self._validate_image_data(pixel_values), + ) + + if expected_input_type == ImageInputType.IMAGE_FEATURES: + if pixel_values is not None: + raise ValueError( + "Expected image features but got pixel values") + if image_features is None: + return None + + if not isinstance(image_features, torch.Tensor): + raise ValueError("Incorrect type of image features") + + return LlavaImageFeatureInputs( + type="image_features", + data=self._validate_image_data(image_features), + ) + + return None + + def _select_image_features(self, image_features: torch.Tensor, *, + strategy: str) -> torch.Tensor: + # Copied from https://github.com/huggingface/transformers/blob/39c3c0a72af6fbda5614dde02ff236069bb79827/src/transformers/models/llava/modeling_llava.py#L421 # noqa + if strategy == "default": + return image_features[:, 1:] + elif strategy == "full": + return image_features + + raise ValueError(f"Unexpected select feature strategy: {strategy}") + + def _image_pixels_to_features(self, vision_tower: CLIPVisionModel, + pixel_values: torch.Tensor) -> torch.Tensor: + # TODO(xwjiang): Maybe port minimal CLIPVisionModel over. + image_outputs = vision_tower(pixel_values.to(vision_tower.device), + output_hidden_states=True) + + image_features = image_outputs.hidden_states[ + self.config.vision_feature_layer] + + return self._select_image_features( + image_features, + strategy=self.config.vision_feature_select_strategy, + ) + + def _process_image_pixels(self, + inputs: LlavaImagePixelInputs) -> torch.Tensor: + assert self.vision_tower is not None + + pixel_values = inputs["data"] + + return self._image_pixels_to_features(self.vision_tower, pixel_values) + + def _process_image_input(self, + image_input: LlavaImageInputs) -> torch.Tensor: + if image_input["type"] == "pixel_values": + assert self.vision_tower is not None + image_features = self._process_image_pixels(image_input) + else: + image_features = image_input["data"] + + return self.multi_modal_projector(image_features) + def forward( self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, - image_input: Optional[torch.Tensor] = None + **kwargs: object, ) -> SamplerOutput: # noqa: E501 """Run forward pass for Llava 1.5. @@ -135,46 +254,25 @@ def forward( Args: input_ids: Flattened (concatenated) input_ids corresponding to a batch. - image_input: A batch of image inputs. - For PIXEL_VALUES, expecting [1, 3, 336, 336]. - For IMAGE_FEATURES, expecting [1, 576, 1024]. + pixel_values: For PIXEL_VALUES, expects a batch with shape + [1, 3, 336, 336]. + image_features: For IMAGE_FEATURES, expects a batch with shape + [1, 576, 1024]. """ + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is not None: - if list(image_input.shape[1:]) != list( - self.vision_language_config.image_input_shape[1:]): - raise ValueError( - f"The expected image tensor shape is batch dimension " - f"plus " - f"{self.vision_language_config.image_input_shape[1:]}." - f" You supplied {image_input.shape}. " - f"If you are using vLLM's entrypoint, make sure your " - f"supplied image input is consistent with " - f"image_input_shape in engine args.") - if self.vision_tower is not None: - # TODO(xwjiang): Maybe port minimal CLIPVisionModel over. - image_outputs = self.vision_tower(image_input, - output_hidden_states=True) - image_features = image_outputs.hidden_states[ - self.config.vision_feature_layer] - # Copied from https://github.com/huggingface/transformers/blob/39c3c0a72af6fbda5614dde02ff236069bb79827/src/transformers/models/llava/modeling_llava.py#L421 # noqa - if self.config.vision_feature_select_strategy == "default": - image_features = image_features[:, 1:] - elif self.config.vision_feature_select_strategy == "full": - image_features = image_features - else: - raise ValueError( - f"Unexpected select feature strategy: " - f"{self.config.vision_feature_select_strategy}") - else: - image_features = image_input - vision_embeddings = self.multi_modal_projector(image_features) + vision_embeddings = self._process_image_input(image_input) inputs_embeds = self.language_model.get_input_embeddings(input_ids) - _merge_vision_embeddings( + + inputs_embeds = _merge_vision_embeddings( input_ids, inputs_embeds, vision_embeddings, self.vision_language_config.image_token_id) + input_ids = None else: inputs_embeds = None + hidden_states = self.language_model(input_ids, positions, kv_caches, diff --git a/vllm/sequence.py b/vllm/sequence.py index 92362a9a5d2a..3b806a5ee43b 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -1,18 +1,24 @@ """Sequence and its related classes.""" import copy import enum +from abc import ABC, abstractmethod from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, List, Optional, Union from vllm.block import LogicalTokenBlock +from vllm.config import ModelConfig, VisionLanguageConfig +from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.sampling_params import SamplingParams +from vllm.transformers_utils.image_processor import cached_get_image_processor if TYPE_CHECKING: import torch from vllm.spec_decode.metrics import SpecDecodeWorkerMetrics +logger = init_logger(__name__) + @dataclass class Logprob: @@ -373,23 +379,61 @@ class SequenceGroupState: generator: Optional = None # type: ignore -class MultiModalData: - """Multi modal request. - - Args: - type: The data type. - data: The actual data. - The required shape and semantic meaning of it depends on the vision - language config of the hosted model. - See `VisionLanguageConfig` in `config.py`. - """ +class MultiModalData(ABC): + + @abstractmethod + def get_input_kwargs( + self, model_config: ModelConfig, + vlm_config: VisionLanguageConfig) -> Dict[str, "torch.Tensor"]: + """Returns a dictionary which are passed as keyword arguments to + :meth:`torch.nn.Module.forward`. + """ + raise NotImplementedError + + +class ImagePixelData(MultiModalData): + + def __init__(self, pixel_values: "torch.Tensor") -> None: + self.pixel_values = pixel_values + + def _get_image_processor(self, model_config: ModelConfig, + vlm_config: VisionLanguageConfig): + if vlm_config is None or vlm_config.image_processor is None: + return None + + return cached_get_image_processor( + vlm_config.image_processor, + trust_remote_code=model_config.trust_remote_code, + revision=vlm_config.image_processor_revision, + ) + + def get_input_kwargs( + self, model_config: ModelConfig, + vlm_config: VisionLanguageConfig) -> Dict[str, "torch.Tensor"]: + image_processor = self._get_image_processor(model_config, vlm_config) + if image_processor is None: + return {"pixel_values": self.pixel_values} + + try: + out_dict = image_processor.preprocess(self.pixel_values) \ + .convert_to_tensors("pt") + except Exception: + logger.error("Failed to process image with shape %s", + self.pixel_values.shape) + raise + + return out_dict.data + + +class ImageFeatureData(MultiModalData): - class Type(enum.Enum): - IMAGE = enum.auto() + def __init__(self, image_features: "torch.Tensor") -> None: + self.image_features = image_features - def __init__(self, type: Type, data: "torch.Tensor"): - self.type = type - self.data = data + def get_input_kwargs( + self, model_config: ModelConfig, + vlm_config: VisionLanguageConfig) -> Dict[str, "torch.Tensor"]: + return {"image_features": self.image_features} class SequenceGroup: @@ -401,7 +445,8 @@ class SequenceGroup: sampling_params: The sampling parameters used to generate the outputs. arrival_time: The arrival time of the request. lora_request: LoRA request. - multi_modal_data: Multi modal data associated with the request. + multi_modal_kwargs: Extra kwargs to the model that are associated with + multi modal data. """ def __init__( @@ -411,7 +456,7 @@ def __init__( sampling_params: SamplingParams, arrival_time: float, lora_request: Optional[LoRARequest] = None, - multi_modal_data: Optional[MultiModalData] = None, + multi_modal_kwargs: Optional[Dict[str, "torch.Tensor"]] = None, ) -> None: self.request_id = request_id self.seqs_dict = {seq.seq_id: seq for seq in seqs} @@ -424,7 +469,7 @@ def __init__( self.lora_request = lora_request self.prompt_logprobs: Optional[PromptLogprobs] = None self.state = SequenceGroupState() - self.multi_modal_data = multi_modal_data + self.multi_modal_kwargs = multi_modal_kwargs or {} @property def prompt(self) -> str: @@ -575,7 +620,7 @@ def __init__( lora_request: Optional[LoRARequest] = None, computed_block_nums: Optional[List[int]] = None, state: Optional[SequenceGroupState] = None, - multi_modal_data: Optional[MultiModalData] = None, + multi_modal_kwargs: Optional[Dict[str, "torch.Tensor"]] = None, ) -> None: self.request_id = request_id self.is_prompt = is_prompt @@ -584,7 +629,7 @@ def __init__( self.block_tables = block_tables self.lora_request = lora_request self.computed_block_nums = computed_block_nums - self.multi_modal_data = multi_modal_data + self.multi_modal_kwargs = multi_modal_kwargs or {} self.state = SequenceGroupState() if state is None else state self._token_chunk_size = token_chunk_size diff --git a/vllm/transformers_utils/image_processor.py b/vllm/transformers_utils/image_processor.py index 2bb5215d4846..3239b1d0cfa2 100644 --- a/vllm/transformers_utils/image_processor.py +++ b/vllm/transformers_utils/image_processor.py @@ -1,3 +1,4 @@ +from functools import lru_cache from typing import Optional from transformers import AutoImageProcessor @@ -39,3 +40,6 @@ def get_image_processor( raise e return processor + + +cached_get_image_processor = lru_cache(get_image_processor) diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 31e08789dfd1..627867aea24e 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -1,5 +1,6 @@ import contextlib import time +from collections import defaultdict from enum import IntEnum from typing import Dict, List, NamedTuple, Optional, Set, Tuple @@ -21,8 +22,8 @@ from vllm.model_executor import SamplingMetadata from vllm.model_executor.model_loader import get_model from vllm.sampling_params import SamplingParams, SamplingType -from vllm.sequence import (MultiModalData, SamplerOutput, SequenceData, - SequenceGroupMetadata) +from vllm.sequence import (ImageFeatureData, ImagePixelData, SamplerOutput, + SequenceData, SequenceGroupMetadata) from vllm.utils import (CudaMemoryProfiler, async_tensor_h2d, is_hip, is_pin_memory_available, make_tensor_with_pad, maybe_expand_dim) @@ -48,7 +49,7 @@ class PreparePromptMetadata(NamedTuple): lora_index_mapping: List[int] lora_prompt_mapping: List[int] lora_requests: Set[LoRARequest] - multi_modal_input: Optional[torch.Tensor] + multi_modal_kwargs: Dict[str, torch.Tensor] slot_mapping: List[int] @classmethod @@ -62,7 +63,7 @@ def empty(cls): lora_index_mapping=[], lora_prompt_mapping=[], lora_requests=set(), - multi_modal_input=None, + multi_modal_kwargs={}, slot_mapping=[], ) @@ -235,7 +236,8 @@ def _prepare_prompt( context_lens: List[int] = [] subquery_lens: List[int] = [] prefix_block_tables: List[List[int]] = [] - multi_modal_input_list: List[torch.Tensor] = [] + multi_modal_kwargs_list: Dict[str, + List[torch.Tensor]] = defaultdict(list) if len(seq_group_metadata_list) == 0: return PreparePromptMetadata.empty() @@ -306,9 +308,8 @@ def _prepare_prompt( (prompt_len - computed_len if seq_group_metadata.sampling_params.prompt_logprobs else 1)) - if seq_group_metadata.multi_modal_data: - multi_modal_input_list.append( - seq_group_metadata.multi_modal_data.data) + for k, v in seq_group_metadata.multi_modal_kwargs.items(): + multi_modal_kwargs_list[k].append(v) if seq_group_metadata.block_tables is None: # During memory profiling, the block tables are not initialized @@ -348,15 +349,6 @@ def _prepare_prompt( dtype=torch.int, device=self.device) - if multi_modal_input_list: - assert self.vision_language_config, ( - "Multi-modal inputs are only supported by " - "vision language models.") - multi_modal_input = torch.cat(multi_modal_input_list, - dim=0).to(self.device) - else: - multi_modal_input = None - # Prepare prefix block tables max_prompt_block_table_len = max(len(t) for t in prefix_block_tables) block_tables = make_tensor_with_pad( @@ -407,6 +399,11 @@ def _prepare_prompt( use_cuda_graph=False, ) + multi_modal_kwargs = { + k: torch.cat(v, dim=0).to(self.device) + for k, v in multi_modal_kwargs_list.items() + } + return PreparePromptMetadata( input_tokens=input_tokens, input_positions=input_positions, @@ -416,7 +413,7 @@ def _prepare_prompt( lora_index_mapping=lora_index_mapping, lora_prompt_mapping=lora_prompt_mapping, lora_requests=lora_requests, - multi_modal_input=multi_modal_input, + multi_modal_kwargs=multi_modal_kwargs, slot_mapping=slot_mapping, ) @@ -672,7 +669,7 @@ def prepare_input_tensors( lora_index_mapping, lora_prompt_mapping, lora_requests, - multi_modal_input, + multi_modal_kwargs, slot_mapping, ) = self._prepare_prompt(prefill_reqs) ( @@ -740,7 +737,7 @@ def prepare_input_tensors( sampling_metadata.selected_token_indices, "lora_requests": lora_requests, "lora_mapping": lora_mapping, - "multi_modal_input": multi_modal_input, + "multi_modal_kwargs": multi_modal_kwargs, "num_prefill_tokens": num_prefill_tokens, "num_decode_tokens": num_decode_tokens, "slot_mapping": slot_mapping, @@ -771,7 +768,7 @@ def prepare_input_tensors( "selected_token_indices") lora_mapping = metadata_dict.pop("lora_mapping") lora_requests = metadata_dict.pop("lora_requests") - multi_modal_input = metadata_dict.pop("multi_modal_input") + multi_modal_kwargs = metadata_dict.pop("multi_modal_kwargs") num_prefill_tokens = metadata_dict.pop("num_prefill_tokens") num_decode_tokens = metadata_dict.pop("num_decode_tokens") batch_type = metadata_dict.pop("batch_type") @@ -814,7 +811,7 @@ def prepare_input_tensors( return (input_tokens, input_positions, attn_metadata, sampling_metadata, lora_requests, lora_mapping, - multi_modal_input) + multi_modal_kwargs) @torch.inference_mode() def execute_model( @@ -823,7 +820,7 @@ def execute_model( kv_caches: List[torch.Tensor], ) -> Optional[SamplerOutput]: (input_tokens, input_positions, attn_metadata, sampling_metadata, - lora_requests, lora_mapping, multi_modal_input + lora_requests, lora_mapping, multi_modal_kwargs ) = self.prepare_input_tensors(seq_group_metadata_list) if self.lora_config: @@ -837,15 +834,14 @@ def execute_model( model_executable = self.graph_runners[graph_batch_size] else: model_executable = self.model - execute_model_kwargs = { - "input_ids": input_tokens, - "positions": input_positions, - "kv_caches": kv_caches, - "attn_metadata": attn_metadata, - } - if self.vision_language_config: - execute_model_kwargs.update({"image_input": multi_modal_input}) - hidden_states = model_executable(**execute_model_kwargs) + + hidden_states = model_executable( + input_ids=input_tokens, + positions=input_positions, + kv_caches=kv_caches, + attn_metadata=attn_metadata, + **multi_modal_kwargs, + ) # Compute the logits. logits = self.model.compute_logits(hidden_states, sampling_metadata) @@ -907,8 +903,8 @@ def profile_run(self) -> None: for group_id in range(max_num_seqs): seq_len = (max_num_batched_tokens // max_num_seqs + (group_id < max_num_batched_tokens % max_num_seqs)) - seq_data, fake_multi_modal_input = _prepare_fake_inputs( - seq_len, self.vision_language_config) + seq_data, multi_modal_kwargs = _prepare_fake_inputs( + seq_len, self.model_config, self.vision_language_config) seq = SequenceGroupMetadata( request_id=str(group_id), is_prompt=True, @@ -917,7 +913,7 @@ def profile_run(self) -> None: block_tables=None, lora_request=dummy_lora_requests_per_seq[group_id] if dummy_lora_requests_per_seq else None, - multi_modal_data=fake_multi_modal_input, + multi_modal_kwargs=multi_modal_kwargs, ) seqs.append(seq) @@ -1191,18 +1187,37 @@ def _get_graph_batch_size(batch_size: int) -> int: def _prepare_fake_inputs( - seq_len: int, vision_language_config: Optional[VisionLanguageConfig]): + seq_len: int, model_config: ModelConfig, + vision_language_config: Optional[VisionLanguageConfig]): """Prepare fake inputs for profile run.""" if vision_language_config: prompt_tokens = [ vision_language_config.image_token_id ] * vision_language_config.image_feature_size + [0] * ( seq_len - vision_language_config.image_feature_size) - fake_image_input = MultiModalData( - type=MultiModalData.Type.IMAGE, - data=torch.zeros(vision_language_config.image_input_shape, - dtype=torch.float16)) + + if vision_language_config.image_processor is None: + values_dtype = torch.float16 + else: + values_dtype = torch.uint8 + + values = torch.zeros(vision_language_config.image_input_shape, + dtype=values_dtype) + + config_input_type = vision_language_config.image_input_type + ImageInputType = VisionLanguageConfig.ImageInputType + + if config_input_type == ImageInputType.PIXEL_VALUES: + fake_mm_data = ImagePixelData(values) + elif config_input_type == ImageInputType.IMAGE_FEATURES: + fake_mm_data = ImageFeatureData(values) + else: + raise NotImplementedError + + fake_mm_kwargs = fake_mm_data.get_input_kwargs(model_config, + vision_language_config) else: prompt_tokens = [0] * seq_len - fake_image_input = None - return SequenceData(prompt_tokens), fake_image_input + fake_mm_kwargs = {} + + return SequenceData(prompt_tokens), fake_mm_kwargs From 483b190a23f4d85f1761671757c2ceb0ff66b16f Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 06:17:15 +0000 Subject: [PATCH 36/45] Fix image processing not working directly, due to tensor being passed - Now, `ImagePixelData` only accepts `PIL.Image` input - Also move `torch` import out of `TYPE_CHECKING` as it is loaded anyways when importing `SamplingParams` --- docs/source/models/vlm.rst | 2 +- examples/llava_example.py | 7 +++-- vllm/entrypoints/openai/serving_chat.py | 11 +------ vllm/sequence.py | 42 +++++++++++++++---------- vllm/worker/model_runner.py | 5 ++- 5 files changed, 37 insertions(+), 30 deletions(-) diff --git a/docs/source/models/vlm.rst b/docs/source/models/vlm.rst index a33426a0d17a..854885ea2681 100644 --- a/docs/source/models/vlm.rst +++ b/docs/source/models/vlm.rst @@ -65,7 +65,7 @@ For now, we only support a single image per text prompt when calling ``llm.gener prompt = "" * 576 + ( "\nUSER: What is the content of this image?\nASSISTANT:") - # Load the image and reshape to (1, 3, 336, 336) + # Load the image using PIL.Image image = ... outputs = llm.generate(prompt, multi_modal_datas=ImagePixelData(image)) diff --git a/examples/llava_example.py b/examples/llava_example.py index 1d66b7a673a8..1c25d3fe9491 100644 --- a/examples/llava_example.py +++ b/examples/llava_example.py @@ -3,6 +3,7 @@ import subprocess import torch +from PIL import Image from vllm import LLM from vllm.sequence import ImageFeatureData, ImagePixelData @@ -24,7 +25,9 @@ def run_llava_pixel_values(): "\nUSER: What is the content of this image?\nASSISTANT:") # This should be provided by another online or offline component. - image = torch.load("images/stop_sign_pixel_values.pt") + image_tensor: torch.Tensor = torch.load("images/stop_sign_pixel_values.pt") + image_arr = image_tensor.view(3, 336, 336).permute((1, 2, 0)).numpy() + image = Image.fromarray(image_arr, mode="RGB") outputs = llm.generate(prompt, multi_modal_datas=ImagePixelData(image)) for o in outputs: @@ -46,7 +49,7 @@ def run_llava_image_features(): "\nUSER: What is the content of this image?\nASSISTANT:") # This should be provided by another online or offline component. - image = torch.load("images/stop_sign_image_features.pt") + image: torch.Tensor = torch.load("images/stop_sign_image_features.pt") outputs = llm.generate(prompt, multi_modal_datas=ImageFeatureData(image)) for o in outputs: diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index b817f10bf571..82335283bdc6 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -3,8 +3,6 @@ from typing import (AsyncGenerator, AsyncIterator, Awaitable, Iterable, List, Optional, Tuple, TypedDict, Union, final) -import numpy as np -import torch from fastapi import Request from openai.types.chat import (ChatCompletionContentPartParam, ChatCompletionRole) @@ -50,14 +48,7 @@ def __init__(self, async def _get_and_parse_image(self, image_url: str) -> ImagePixelData: with await get_image_async(image_url) as image: - image_arr = np.array(image, copy=True) - - # Passed to the image processor which is loaded from HuggingFace - image_tensor = torch.as_tensor(image_arr) \ - .view(1, image.height, image.width, -1) \ - .permute((0, 3, 1, 2)) # NCHW - - return ImagePixelData(image_tensor) + return ImagePixelData(image) def _parse_chat_message_image_input( self, diff --git a/vllm/sequence.py b/vllm/sequence.py index 3b806a5ee43b..841be604e448 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -5,6 +5,10 @@ from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, List, Optional, Union +import numpy as np +import torch +from PIL import Image + from vllm.block import LogicalTokenBlock from vllm.config import ModelConfig, VisionLanguageConfig from vllm.logger import init_logger @@ -13,8 +17,6 @@ from vllm.transformers_utils.image_processor import cached_get_image_processor if TYPE_CHECKING: - import torch - from vllm.spec_decode.metrics import SpecDecodeWorkerMetrics logger = init_logger(__name__) @@ -384,7 +386,7 @@ class MultiModalData(ABC): @abstractmethod def get_input_kwargs( self, model_config: ModelConfig, - vlm_config: VisionLanguageConfig) -> Dict[str, "torch.Tensor"]: + vlm_config: VisionLanguageConfig) -> Dict[str, torch.Tensor]: """Returns a dictionary which are passed as keyword arguments to :meth:`torch.nn.Module.forward`. """ @@ -393,8 +395,11 @@ def get_input_kwargs( class ImagePixelData(MultiModalData): - def __init__(self, pixel_values: "torch.Tensor") -> None: - self.pixel_values = pixel_values + def __init__(self, image: Image.Image) -> None: + # So that this class can be created inside the Image context manager + image.load() + + self.image = image def _get_image_processor(self, model_config: ModelConfig, vlm_config: VisionLanguageConfig): @@ -409,17 +414,22 @@ def _get_image_processor(self, model_config: ModelConfig, def get_input_kwargs( self, model_config: ModelConfig, - vlm_config: VisionLanguageConfig) -> Dict[str, "torch.Tensor"]: + vlm_config: VisionLanguageConfig) -> Dict[str, torch.Tensor]: image_processor = self._get_image_processor(model_config, vlm_config) if image_processor is None: - return {"pixel_values": self.pixel_values} + image = self.image + image_arr = np.array(image, copy=True) + pixel_values = torch.as_tensor(image_arr) \ + .view(1, image.height, image.width, -1) \ + .permute((0, 3, 1, 2)) # NCHW + + return {"pixel_values": pixel_values} try: - out_dict = image_processor.preprocess(self.pixel_values) \ + out_dict = image_processor.preprocess(self.image) \ .convert_to_tensors("pt") except Exception: - logger.error("Failed to process image with shape %s", - self.pixel_values.shape) + logger.error("Failed to process image (%s)", self.image) raise return out_dict.data @@ -427,12 +437,12 @@ def get_input_kwargs( class ImageFeatureData(MultiModalData): - def __init__(self, image_features: "torch.Tensor") -> None: + def __init__(self, image_features: torch.Tensor) -> None: self.image_features = image_features def get_input_kwargs( self, model_config: ModelConfig, - vlm_config: VisionLanguageConfig) -> Dict[str, "torch.Tensor"]: + vlm_config: VisionLanguageConfig) -> Dict[str, torch.Tensor]: return {"image_features": self.image_features} @@ -456,7 +466,7 @@ def __init__( sampling_params: SamplingParams, arrival_time: float, lora_request: Optional[LoRARequest] = None, - multi_modal_kwargs: Optional[Dict[str, "torch.Tensor"]] = None, + multi_modal_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> None: self.request_id = request_id self.seqs_dict = {seq.seq_id: seq for seq in seqs} @@ -620,7 +630,7 @@ def __init__( lora_request: Optional[LoRARequest] = None, computed_block_nums: Optional[List[int]] = None, state: Optional[SequenceGroupState] = None, - multi_modal_kwargs: Optional[Dict[str, "torch.Tensor"]] = None, + multi_modal_kwargs: Optional[Dict[str, torch.Tensor]] = None, ) -> None: self.request_id = request_id self.is_prompt = is_prompt @@ -718,10 +728,10 @@ class SamplerOutput: outputs: List[SequenceGroupOutput] # On-device tensor containing probabilities of each token. - sampled_token_probs: Optional["torch.Tensor"] = None + sampled_token_probs: Optional[torch.Tensor] = None # On-device tensor containing the sampled token ids. - sampled_token_ids: Optional["torch.Tensor"] = None + sampled_token_ids: Optional[torch.Tensor] = None # Spec decode metrics populated by workers. spec_decode_worker_metrics: Optional["SpecDecodeWorkerMetrics"] = None diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 627867aea24e..f58f70c65b88 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -7,6 +7,7 @@ import numpy as np import torch import torch.nn as nn +from PIL import Image from vllm.attention import (AttentionMetadata, AttentionMetadataPerStage, get_attn_backend) @@ -1208,7 +1209,9 @@ def _prepare_fake_inputs( ImageInputType = VisionLanguageConfig.ImageInputType if config_input_type == ImageInputType.PIXEL_VALUES: - fake_mm_data = ImagePixelData(values) + values_arr = values.view(3, 336, 336).permute((1, 2, 0)).numpy() + image = Image.fromarray(values_arr, mode="RGB") + fake_mm_data = ImagePixelData(image) elif config_input_type == ImageInputType.IMAGE_FEATURES: fake_mm_data = ImageFeatureData(values) else: From 0b6af35651c1fbc139d7705d1779ca7ec634069e Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 07:31:16 +0000 Subject: [PATCH 37/45] Revert to using 7b model in testing --- tests/models/test_llava.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index db0159a02b42..8785ee2afc3d 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -26,8 +26,9 @@ def iter_llava_configs(model_name: str): model_and_vl_config = [ - # *iter_llava_configs("llava-hf/llava-1.5-7b-hf"), - *iter_llava_configs("llava-hf/llava-1.5-13b-hf"), + *iter_llava_configs("llava-hf/llava-1.5-7b-hf"), + # Not enough memory + # *iter_llava_configs("llava-hf/llava-1.5-13b-hf"), ] From e4c35029124144f178aa1e3e4d076a46a1493344 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 09:42:30 +0000 Subject: [PATCH 38/45] Get LLaVA-Next to work with fixed-size images - Note the patch in `ImagePixelData`. To fully leverage the potential of LLaVA-Next, we should allow image of any size, but the feature size would then be variable. --- tests/conftest.py | 5 +- tests/models/test_llava.py | 16 ++ tests/tokenization/test_image_processor.py | 1 + vllm/model_executor/models/__init__.py | 2 + vllm/model_executor/models/llava_next.py | 214 +++++++++++++++++++++ vllm/sequence.py | 9 +- 6 files changed, 242 insertions(+), 5 deletions(-) create mode 100644 vllm/model_executor/models/llava_next.py diff --git a/tests/conftest.py b/tests/conftest.py index b40225f0af74..acd1caa30b75 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -7,7 +7,8 @@ import torch from PIL import Image from transformers import (AutoModelForCausalLM, AutoProcessor, - LlavaForConditionalGeneration) + LlavaForConditionalGeneration, + LlavaNextForConditionalGeneration) from vllm import LLM, SamplingParams from vllm.config import TokenizerPoolConfig, VisionLanguageConfig @@ -130,7 +131,7 @@ def example_long_prompts() -> List[str]: _VISION_LANGUAGE_MODELS = { "llava-hf/llava-1.5-7b-hf": LlavaForConditionalGeneration, "llava-hf/llava-1.5-13b-hf": LlavaForConditionalGeneration, - "llava-hf/bakLlava-v1-hf": LlavaForConditionalGeneration, + "llava-hf/llava-v1.6-34b-hf": LlavaNextForConditionalGeneration, } diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index 8785ee2afc3d..126cd4478666 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -25,10 +25,26 @@ def iter_llava_configs(model_name: str): image_processor_revision=None)) +def iter_llava_next_configs(model_name: str): + for input_type, input_shape in [ + # `vision_config` on HuggingFace only supports `image_size=336` + (VisionLanguageConfig.ImageInputType.PIXEL_VALUES, (1, 3, 336, 336)), + (VisionLanguageConfig.ImageInputType.IMAGE_FEATURES, (1, 576, 1024)), + ]: + yield (model_name, + VisionLanguageConfig(image_input_type=input_type, + image_feature_size=576, + image_token_id=64000, + image_input_shape=input_shape, + image_processor=None, + image_processor_revision=None)) + + model_and_vl_config = [ *iter_llava_configs("llava-hf/llava-1.5-7b-hf"), # Not enough memory # *iter_llava_configs("llava-hf/llava-1.5-13b-hf"), + # *iter_llava_next_configs("llava-hf-llava-v1.6-34b-hf"), ] diff --git a/tests/tokenization/test_image_processor.py b/tests/tokenization/test_image_processor.py index d7989bd623e2..5ba232336741 100644 --- a/tests/tokenization/test_image_processor.py +++ b/tests/tokenization/test_image_processor.py @@ -5,6 +5,7 @@ IMAGE_PROCESSOR_NAMES = [ "llava-hf/llava-1.5-7b-hf", + "llava-hf/llava-v1.6-34b-hf", ] diff --git a/vllm/model_executor/models/__init__.py b/vllm/model_executor/models/__init__.py index ff45838f34e7..ae2353eaa269 100755 --- a/vllm/model_executor/models/__init__.py +++ b/vllm/model_executor/models/__init__.py @@ -34,6 +34,8 @@ "LlavaForConditionalGeneration": ("llava", "LlavaForConditionalGeneration"), "LlavaForCausalLM": ("llava", "LlavaForConditionalGeneration"), + "LlavaNextForConditionalGeneration": + ("llava_next", "LlavaNextForConditionalGeneration"), # For decapoda-research/llama-* "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"), "MistralForCausalLM": ("llama", "LlamaForCausalLM"), diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py new file mode 100644 index 000000000000..b97c9f3643a2 --- /dev/null +++ b/vllm/model_executor/models/llava_next.py @@ -0,0 +1,214 @@ +from typing import Optional, TypedDict, Union + +import torch +from torch import nn +from transformers import LlavaNextConfig +from transformers.models.llava_next.modeling_llava_next import ( + get_anyres_image_grid_shape, unpad_image) + +from vllm.config import VisionLanguageConfig +from vllm.model_executor.layers.linear import LinearMethodBase + +from .llava import (LlavaImagePixelInputs, LlavaImageFeatureInputs, + LlavaForConditionalGeneration) + + +class ImageSizesMixin(TypedDict, total=False): + image_sizes: torch.Tensor + """Shape: (batch_size, 2)""" + + +class LlavaNextImagePixelInputs(ImageSizesMixin, LlavaImagePixelInputs): + data: torch.Tensor + """Shape: (batch_size, 1 + num_patches, num_channels, height, width)""" + + +class LlavaNextImageFeatureInputs(ImageSizesMixin, LlavaImageFeatureInputs): + data: torch.Tensor + """Shape: (batch_size, 1 + num_patches, image_feature_size, hidden_size)""" + + +LlavaNextImageInputs = Union[LlavaNextImagePixelInputs, + LlavaNextImageFeatureInputs] + + +class LlavaNextForConditionalGeneration(LlavaForConditionalGeneration): + """ + Args to `forward()`: + input_ids: Flattened (concatenated) input_ids corresponding to a + batch. + pixel_values: For PIXEL_VALUES, expects a batch with shape + [1, num_patches, 3, 336, 336]. + image_features: For IMAGE_FEATURES, expects a batch with shape + [1, num_patches, 576, 1024]. + """ + + def __init__(self, + config: LlavaNextConfig, + vision_language_config: VisionLanguageConfig, + linear_method: Optional[LinearMethodBase] = None) -> None: + super().__init__(config, vision_language_config, linear_method) + + # Update the type annotation from that of its superclass + self.config = config + + self.image_newline = nn.Parameter( + torch.empty(config.text_config.hidden_size)) + + def _validate_image_data(self, data: torch.Tensor) -> torch.Tensor: + if list(data.shape[2:]) != list( + self.vision_language_config.image_input_shape[1:]): + raise ValueError( + f"The expected image tensor shape is batch dimension " + f"plus num_patches plus " + f"{self.vision_language_config.image_input_shape[1:]}." + f" You supplied {data.shape}. " + f"If you are using vLLM's entrypoint, make sure your " + f"supplied image input is consistent with " + f"image_input_shape in engine args.") + + return data + + def _parse_and_validate_image_input( + self, **kwargs: object) -> Optional[LlavaNextImageInputs]: + pixel_values = kwargs.pop("pixel_values", None) + image_sizes = kwargs.pop("image_sizes", None) + image_features = kwargs.pop("image_features", None) + + expected_input_type = self.vision_language_config.image_input_type + ImageInputType = VisionLanguageConfig.ImageInputType + + if expected_input_type == ImageInputType.PIXEL_VALUES: + if image_features is not None: + raise ValueError( + "Expected pixel values but got image features") + if pixel_values is None: + return None + + if not isinstance(pixel_values, torch.Tensor): + raise ValueError("Incorrect type of pixel values") + + if not isinstance(image_sizes, torch.Tensor): + raise ValueError("Incorrect type of image sizes") + + return LlavaNextImagePixelInputs( + type="pixel_values", + data=self._validate_image_data(pixel_values), + image_sizes=image_sizes, + ) + + if expected_input_type == ImageInputType.IMAGE_FEATURES: + if pixel_values is not None: + raise ValueError( + "Expected image features but got pixel values") + if image_features is None: + return None + + if not isinstance(image_features, torch.Tensor): + raise ValueError("Incorrect type of image features") + + return LlavaNextImageFeatureInputs( + type="image_features", + data=self._validate_image_data(image_features), + ) + + return None + + def _merge_image_patch_embeddings(self, image_size: torch.Tensor, + patch_embeddings: torch.Tensor, *, + strategy: str) -> torch.Tensor: + if strategy == "flat": + return patch_embeddings.flatten(0, 1) + + if strategy.startswith("spatial"): + orig_width, orig_height = image_size + height = width = self.config.vision_config.image_size \ + // self.config.vision_config.patch_size + + base_patch_embeds = patch_embeddings[0] + if height * width != base_patch_embeds.shape[0]: + raise ValueError( + "The number of patches is not consistent with the " + "image size.") + + if patch_embeddings.shape[0] > 1: + other_patch_embeds = patch_embeddings[1:] + + # image_aspect_ratio == "anyres" + num_patch_width, num_patch_height = get_anyres_image_grid_shape( + (orig_width, orig_height), + self.config.image_grid_pinpoints, + self.config.vision_config.image_size, + ) + other_patch_embeds = other_patch_embeds \ + .view(num_patch_width, num_patch_height, height, width, -1) + + if "unpad" in strategy: + other_patch_embeds = other_patch_embeds \ + .permute(4, 0, 2, 1, 3).contiguous() \ + .flatten(1, 2).flatten(2, 3) + other_patch_embeds = unpad_image(other_patch_embeds, + image_size) + other_patch_embeds = torch.cat(( + other_patch_embeds, + self.image_newline[:, None, None] \ + .expand(*other_patch_embeds.shape[:-1], 1) \ + .to(other_patch_embeds.device), + ), dim=-1) + other_patch_embeds = other_patch_embeds \ + .flatten(1, 2).transpose(0, 1) + else: + other_patch_embeds = other_patch_embeds \ + .permute(0, 2, 1, 3, 4).contiguous() \ + .flatten(0, 3) + + merged_patch_embeddings = torch.cat( + (base_patch_embeds, other_patch_embeds), dim=0) + else: + if "unpad" in strategy: + merged_patch_embeddings = torch.cat( + (base_patch_embeds, + self.image_newline[None] \ + .to(base_patch_embeds.device) + ), dim=0) + else: + merged_patch_embeddings = base_patch_embeds + + return merged_patch_embeddings + + raise ValueError(f"Unexpected patch merge strategy: {strategy}") + + def _process_image_pixels( + self, inputs: LlavaNextImagePixelInputs) -> torch.Tensor: + assert self.vision_tower is not None + + pixel_values = inputs["data"] + + b, num_patches, c, h, w = pixel_values.shape + stacked_pixel_values = pixel_values.view(b * num_patches, c, h, w) + + stacked_image_features = self._image_pixels_to_features( + self.vision_tower, stacked_pixel_values) + + return stacked_image_features.view(b, num_patches, + *stacked_image_features.shape[-2:]) + + def _process_image_input( + self, image_input: LlavaNextImageInputs) -> torch.Tensor: + patch_embeddings = super()._process_image_input(image_input) + + image_sizes = image_input.get("image_sizes") + if image_sizes is None: + batch_size = image_input["data"].shape[0] + default_width, default_height = self.config.vision_config.image_size + image_sizes = torch.as_tensor([[default_width, default_height] + for _ in range(batch_size)]) + + merged_patch_embeddings = [ + self._merge_image_patch_embeddings(image_sizes[i], + patch_features, + strategy="spatial_unpad") + for i, patch_features in enumerate(patch_embeddings) + ] + + return torch.stack(merged_patch_embeddings, dim=0) diff --git a/vllm/sequence.py b/vllm/sequence.py index 841be604e448..6aa589187b19 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -415,9 +415,12 @@ def _get_image_processor(self, model_config: ModelConfig, def get_input_kwargs( self, model_config: ModelConfig, vlm_config: VisionLanguageConfig) -> Dict[str, torch.Tensor]: + # Temporary patch to make LLaVA-NeXT usable + # When image size is (336, 336), the feature size is fixed to 1176 + image = self.image.resize((336, 336)) + image_processor = self._get_image_processor(model_config, vlm_config) if image_processor is None: - image = self.image image_arr = np.array(image, copy=True) pixel_values = torch.as_tensor(image_arr) \ .view(1, image.height, image.width, -1) \ @@ -426,10 +429,10 @@ def get_input_kwargs( return {"pixel_values": pixel_values} try: - out_dict = image_processor.preprocess(self.image) \ + out_dict = image_processor.preprocess(image) \ .convert_to_tensors("pt") except Exception: - logger.error("Failed to process image (%s)", self.image) + logger.error("Failed to process image (%s)", image) raise return out_dict.data From 21aaf3de54cb50427c054d093c1247ced7f7a5a1 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 10:01:16 +0000 Subject: [PATCH 39/45] Apply formatter and fix typo --- tests/models/test_llava.py | 2 +- vllm/model_executor/models/llava_next.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index 126cd4478666..18d608af976b 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -44,7 +44,7 @@ def iter_llava_next_configs(model_name: str): *iter_llava_configs("llava-hf/llava-1.5-7b-hf"), # Not enough memory # *iter_llava_configs("llava-hf/llava-1.5-13b-hf"), - # *iter_llava_next_configs("llava-hf-llava-v1.6-34b-hf"), + # *iter_llava_next_configs("llava-hf/llava-v1.6-34b-hf"), ] diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index b97c9f3643a2..04cc59e869b8 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -9,8 +9,8 @@ from vllm.config import VisionLanguageConfig from vllm.model_executor.layers.linear import LinearMethodBase -from .llava import (LlavaImagePixelInputs, LlavaImageFeatureInputs, - LlavaForConditionalGeneration) +from .llava import (LlavaForConditionalGeneration, LlavaImageFeatureInputs, + LlavaImagePixelInputs) class ImageSizesMixin(TypedDict, total=False): From ac95b7974abb8be569dcb3a46b073abcdbc7840c Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 12:54:51 +0000 Subject: [PATCH 40/45] Fix input shape not being based on config value --- vllm/worker/model_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index f58f70c65b88..db8080b42165 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -1209,7 +1209,7 @@ def _prepare_fake_inputs( ImageInputType = VisionLanguageConfig.ImageInputType if config_input_type == ImageInputType.PIXEL_VALUES: - values_arr = values.view(3, 336, 336).permute((1, 2, 0)).numpy() + values_arr = values.squeeze(dim=0).permute((1, 2, 0)).numpy() image = Image.fromarray(values_arr, mode="RGB") fake_mm_data = ImagePixelData(image) elif config_input_type == ImageInputType.IMAGE_FEATURES: From 9a9a4e702f0e94fcdf28992b3a2d4fb9f614d8dd Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 13:54:29 +0000 Subject: [PATCH 41/45] Allow config to specify other image size for LLaVA-NeXT --- vllm/model_executor/models/llava.py | 4 ++-- vllm/model_executor/models/llava_next.py | 22 +++++++++++++--------- vllm/sequence.py | 4 ++-- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index 33dda575b2af..991fc8e33b48 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -124,8 +124,8 @@ def _validate_image_data(self, data: torch.Tensor) -> torch.Tensor: self.vision_language_config.image_input_shape[1:]): raise ValueError( f"The expected image tensor shape is batch dimension plus " - f"{self.vision_language_config.image_input_shape[1:]}." - f" You supplied {data.shape}. " + f"{self.vision_language_config.image_input_shape[1:]}. " + f"You supplied {data.shape}. " f"If you are using vLLM's entrypoint, make sure your " f"supplied image input is consistent with " f"image_input_shape in engine args.") diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index 04cc59e869b8..f52540a35662 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -40,7 +40,7 @@ class LlavaNextForConditionalGeneration(LlavaForConditionalGeneration): pixel_values: For PIXEL_VALUES, expects a batch with shape [1, num_patches, 3, 336, 336]. image_features: For IMAGE_FEATURES, expects a batch with shape - [1, num_patches, 576, 1024]. + [1, num_patches, 1176, 1024]. """ def __init__(self, @@ -55,14 +55,18 @@ def __init__(self, self.image_newline = nn.Parameter( torch.empty(config.text_config.hidden_size)) - def _validate_image_data(self, data: torch.Tensor) -> torch.Tensor: - if list(data.shape[2:]) != list( - self.vision_language_config.image_input_shape[1:]): + def _validate_image_pixels(self, data: torch.Tensor) -> torch.Tensor: + _, num_channels, _, _ = self.vision_language_config.image_input_shape + + # Note that this is different from that of vLLM vision_language_config + # since the image is resized by the HuggingFace preprocessor + height = width = self.config.vision_config.image_size + + if list(data.shape[2:]) != [num_channels, height, width]: raise ValueError( - f"The expected image tensor shape is batch dimension " - f"plus num_patches plus " - f"{self.vision_language_config.image_input_shape[1:]}." - f" You supplied {data.shape}. " + f"The expected image tensor shape is batch dimension plus " + f"num_patches plus {[num_channels, height, width]}. " + f"You supplied {data.shape}. " f"If you are using vLLM's entrypoint, make sure your " f"supplied image input is consistent with " f"image_input_shape in engine args.") @@ -93,7 +97,7 @@ def _parse_and_validate_image_input( return LlavaNextImagePixelInputs( type="pixel_values", - data=self._validate_image_data(pixel_values), + data=self._validate_image_pixels(pixel_values), image_sizes=image_sizes, ) diff --git a/vllm/sequence.py b/vllm/sequence.py index 6aa589187b19..b80b3f0da54a 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -416,8 +416,8 @@ def get_input_kwargs( self, model_config: ModelConfig, vlm_config: VisionLanguageConfig) -> Dict[str, torch.Tensor]: # Temporary patch to make LLaVA-NeXT usable - # When image size is (336, 336), the feature size is fixed to 1176 - image = self.image.resize((336, 336)) + _, _, h, w = vlm_config.image_input_shape + image = self.image.resize((w, h)) image_processor = self._get_image_processor(model_config, vlm_config) if image_processor is None: From 176ad2cc086d21e964287dc15d4bbc4aed7cb8ab Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 18 Apr 2024 14:54:27 +0000 Subject: [PATCH 42/45] Improve error message to show the expected `image_feature_size` --- vllm/model_executor/models/llava.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index 991fc8e33b48..b794b43aeda8 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -53,7 +53,13 @@ def _merge_vision_embeddings(input_ids: torch.Tensor, image_token_id: int) -> torch.Tensor: """In place merges in vision_embeddings with inputs_embeds.""" mask = (input_ids == image_token_id) - inputs_embeds[mask] = vision_embeddings.view(-1, + + image_feature_size = vision_embeddings.shape[0] * vision_embeddings.shape[1] + if mask.sum() != image_feature_size: + raise ValueError(f"image_feature_size should be {image_feature_size}, " + f"but found: {mask.sum()}") + + inputs_embeds[mask] = vision_embeddings.view(image_feature_size, vision_embeddings.shape[-1]) return inputs_embeds From 91ea0440390675869e24ed8678c9398bb04c6a37 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 19 Apr 2024 03:59:53 +0000 Subject: [PATCH 43/45] Fix dtype mismatch in `multi_modal_kwargs` --- vllm/worker/model_runner.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index db8080b42165..b62f1fd26da9 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -400,8 +400,9 @@ def _prepare_prompt( use_cuda_graph=False, ) + model_dtype = self.model_config.dtype multi_modal_kwargs = { - k: torch.cat(v, dim=0).to(self.device) + k: torch.cat(v, dim=0).to(self.device, dtype=model_dtype) for k, v in multi_modal_kwargs_list.items() } From cb197433a043f3bab32f1bb8eb9c6ba870d06d0f Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 19 Apr 2024 07:49:25 +0000 Subject: [PATCH 44/45] Fix LLaVA example and test w.r.t. image processing refactor - Note that we now load the images directly instead of from `.pt` files --- examples/llava_example.py | 6 +-- tests/conftest.py | 22 ++++----- tests/models/test_llava.py | 93 ++++++++++++++++++++++++-------------- 3 files changed, 70 insertions(+), 51 deletions(-) diff --git a/examples/llava_example.py b/examples/llava_example.py index 1c25d3fe9491..4da365323523 100644 --- a/examples/llava_example.py +++ b/examples/llava_example.py @@ -18,16 +18,13 @@ def run_llava_pixel_values(): image_token_id=32000, image_input_shape="1,3,336,336", image_feature_size=576, - no_image_processor=True, ) prompt = "" * 576 + ( "\nUSER: What is the content of this image?\nASSISTANT:") # This should be provided by another online or offline component. - image_tensor: torch.Tensor = torch.load("images/stop_sign_pixel_values.pt") - image_arr = image_tensor.view(3, 336, 336).permute((1, 2, 0)).numpy() - image = Image.fromarray(image_arr, mode="RGB") + image = Image.open("images/stop_sign.jpg") outputs = llm.generate(prompt, multi_modal_datas=ImagePixelData(image)) for o in outputs: @@ -42,7 +39,6 @@ def run_llava_image_features(): image_token_id=32000, image_input_shape="1,576,1024", image_feature_size=576, - no_image_processor=True, ) prompt = "" * 576 + ( diff --git a/tests/conftest.py b/tests/conftest.py index acd1caa30b75..628df9aaf6c3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -13,7 +13,7 @@ from vllm import LLM, SamplingParams from vllm.config import TokenizerPoolConfig, VisionLanguageConfig from vllm.distributed import destroy_model_parallel -from vllm.sequence import MultiModalData +from vllm.sequence import ImageFeatureData, ImagePixelData, MultiModalData from vllm.transformers_utils.tokenizer import get_tokenizer _TEST_DIR = os.path.dirname(__file__) @@ -21,10 +21,6 @@ _LONG_PROMPTS = [os.path.join(_TEST_DIR, "prompts", "summary.txt")] # Multi modal related -_PIXEL_VALUES_FILES = [ - os.path.join(_TEST_DIR, "images", filename) for filename in - ["stop_sign_pixel_values.pt", "cherry_blossom_pixel_values.pt"] -] _IMAGE_FEATURES_FILES = [ os.path.join(_TEST_DIR, "images", filename) for filename in ["stop_sign_image_features.pt", "cherry_blossom_image_features.pt"] @@ -37,8 +33,7 @@ "\nUSER: What's the content of the image?\nASSISTANT:", "\nUSER: What is the season?\nASSISTANT:" ] -assert len(_PIXEL_VALUES_FILES) == len(_IMAGE_FEATURES_FILES) == len( - _IMAGE_FILES) == len(_IMAGE_PROMPTS) +assert len(_IMAGE_FEATURES_FILES) == len(_IMAGE_FILES) == len(_IMAGE_PROMPTS) def _read_prompts(filename: str) -> List[str]: @@ -86,15 +81,18 @@ def hf_images() -> List[Image.Image]: @pytest.fixture() -def vllm_images(request) -> List[torch.Tensor]: +def vllm_images(request) -> List[MultiModalData]: vision_language_config = request.getfixturevalue("model_and_config")[1] if vision_language_config.image_input_type == ( VisionLanguageConfig.ImageInputType.IMAGE_FEATURES): - filenames = _IMAGE_FEATURES_FILES + return [ + ImageFeatureData(torch.load(filename)) + for filename in _IMAGE_FEATURES_FILES + ] else: - filenames = _PIXEL_VALUES_FILES - - return [torch.load(filename) for filename in filenames] + return [ + ImagePixelData(Image.open(filename)) for filename in _IMAGE_FILES + ] @pytest.fixture() diff --git a/tests/models/test_llava.py b/tests/models/test_llava.py index 18d608af976b..2e2a6faa18b5 100644 --- a/tests/models/test_llava.py +++ b/tests/models/test_llava.py @@ -8,36 +8,47 @@ from transformers import AutoTokenizer from vllm.config import VisionLanguageConfig -from vllm.sequence import ImagePixelData def iter_llava_configs(model_name: str): - for input_type, input_shape in [ - (VisionLanguageConfig.ImageInputType.PIXEL_VALUES, (1, 3, 336, 336)), - (VisionLanguageConfig.ImageInputType.IMAGE_FEATURES, (1, 576, 1024)), - ]: - yield (model_name, - VisionLanguageConfig(image_input_type=input_type, - image_feature_size=576, - image_token_id=32000, - image_input_shape=input_shape, - image_processor=None, - image_processor_revision=None)) + image_hw_to_feature_size = { + (336, 336): 576, + } + + for (h, w), f in image_hw_to_feature_size.items(): + for input_type, input_shape in [ + (VisionLanguageConfig.ImageInputType.PIXEL_VALUES, (1, 3, h, w)), + (VisionLanguageConfig.ImageInputType.IMAGE_FEATURES, (1, f, 1024)), + ]: + yield (model_name, + VisionLanguageConfig(image_input_type=input_type, + image_feature_size=f, + image_token_id=32000, + image_input_shape=input_shape, + image_processor=model_name, + image_processor_revision=None)) def iter_llava_next_configs(model_name: str): - for input_type, input_shape in [ - # `vision_config` on HuggingFace only supports `image_size=336` - (VisionLanguageConfig.ImageInputType.PIXEL_VALUES, (1, 3, 336, 336)), - (VisionLanguageConfig.ImageInputType.IMAGE_FEATURES, (1, 576, 1024)), - ]: - yield (model_name, - VisionLanguageConfig(image_input_type=input_type, - image_feature_size=576, - image_token_id=64000, - image_input_shape=input_shape, - image_processor=None, - image_processor_revision=None)) + image_hw_to_feature_size = { + (336, 336): 1176, + (672, 672): 2928, + (1344, 336): 1944, + (336, 1344): 1890, + } + + for (h, w), f in image_hw_to_feature_size.items(): + for input_type, input_shape in [ + (VisionLanguageConfig.ImageInputType.PIXEL_VALUES, (1, 3, h, w)), + (VisionLanguageConfig.ImageInputType.IMAGE_FEATURES, (1, f, 1024)), + ]: + yield (model_name, + VisionLanguageConfig(image_input_type=input_type, + image_feature_size=f, + image_token_id=64000, + image_input_shape=input_shape, + image_processor=model_name, + image_processor_revision=None)) model_and_vl_config = [ @@ -99,17 +110,27 @@ def test_models(hf_runner, vllm_runner, hf_image_prompts, hf_images, """Inference result should be the same between hf and vllm. All the image fixtures for the test is under tests/images. - For huggingface runner, we provide the raw images as input. - For vllm runner, we provide image tensors and corresponding + For huggingface runner, we provide the PIL images as input. + For vllm runner, we provide MultiModalData objects and corresponding vision language config as input. Note, the text input is also adjusted to abide by vllm contract. The text output is sanitized to be able to compare with hf. """ model_id, vision_language_config = model_and_config + hf_model = hf_runner(model_id, dtype=dtype) - hf_outputs = hf_model.generate_greedy(hf_image_prompts, - max_tokens, - images=hf_images) + _, vision_language_config = model_and_config + if vision_language_config.image_input_type == ( + VisionLanguageConfig.ImageInputType.IMAGE_FEATURES): + # HuggingFace does not support image feature input + hf_outputs = [None] * len(hf_image_prompts) + else: + _, _, h, w = vision_language_config.image_input_shape + hf_outputs = hf_model.generate_greedy( + hf_image_prompts, + max_tokens, + # To be compatible with the patch for LLaVA-NeXT + images=[im.resize((w, h)) for im in hf_images]) del hf_model vllm_model = vllm_runner(model_id, @@ -117,19 +138,23 @@ def test_models(hf_runner, vllm_runner, hf_image_prompts, hf_images, worker_use_ray=worker_use_ray, enforce_eager=True, **as_dict(vision_language_config)) - vllm_outputs = vllm_model.generate_greedy( - vllm_image_prompts, - max_tokens, - multi_modal_datas=[ImagePixelData(image) for image in vllm_images]) + vllm_outputs = vllm_model.generate_greedy(vllm_image_prompts, + max_tokens, + multi_modal_datas=vllm_images) del vllm_model gc.collect() torch.cuda.empty_cache() for i in range(len(hf_image_prompts)): - hf_output_ids, hf_output_str = hf_outputs[i] + hf_output = hf_outputs[i] + if hf_output is None: + continue + + hf_output_ids, hf_output_str = hf_output vllm_output_ids, vllm_output_str = sanitize_vllm_output( vllm_outputs[i], vision_language_config, model_id) + print(f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") assert hf_output_str == vllm_output_str, ( f"Test{i}:\nHF: {hf_output_str!r}\nvLLM: {vllm_output_str!r}") assert hf_output_ids == vllm_output_ids, ( From f882d99e528fd55062ab7012918ba6a0067f1bb5 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Fri, 19 Apr 2024 10:34:52 +0000 Subject: [PATCH 45/45] Fix circular import and set return type - These changes are propagated to the child PRs --- vllm/model_executor/models/llava.py | 2 +- vllm/sequence.py | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index b794b43aeda8..12b18631a1d1 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -40,7 +40,7 @@ def __init__(self, vision_hidden_size: int, text_hidden_size: int, text_hidden_size, bias=True) - def forward(self, image_features: torch.Tensor): + def forward(self, image_features: torch.Tensor) -> torch.Tensor: hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) diff --git a/vllm/sequence.py b/vllm/sequence.py index b80b3f0da54a..203ff4637ed8 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -10,13 +10,13 @@ from PIL import Image from vllm.block import LogicalTokenBlock -from vllm.config import ModelConfig, VisionLanguageConfig from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.sampling_params import SamplingParams from vllm.transformers_utils.image_processor import cached_get_image_processor if TYPE_CHECKING: + from vllm.config import ModelConfig, VisionLanguageConfig from vllm.spec_decode.metrics import SpecDecodeWorkerMetrics logger = init_logger(__name__) @@ -385,8 +385,8 @@ class MultiModalData(ABC): @abstractmethod def get_input_kwargs( - self, model_config: ModelConfig, - vlm_config: VisionLanguageConfig) -> Dict[str, torch.Tensor]: + self, model_config: "ModelConfig", + vlm_config: "VisionLanguageConfig") -> Dict[str, torch.Tensor]: """Returns a dictionary which are passed as keyword arguments to :meth:`torch.nn.Module.forward`. """ @@ -401,8 +401,8 @@ def __init__(self, image: Image.Image) -> None: self.image = image - def _get_image_processor(self, model_config: ModelConfig, - vlm_config: VisionLanguageConfig): + def _get_image_processor(self, model_config: "ModelConfig", + vlm_config: "VisionLanguageConfig"): if vlm_config is None or vlm_config.image_processor is None: return None @@ -413,8 +413,8 @@ def _get_image_processor(self, model_config: ModelConfig, ) def get_input_kwargs( - self, model_config: ModelConfig, - vlm_config: VisionLanguageConfig) -> Dict[str, torch.Tensor]: + self, model_config: "ModelConfig", + vlm_config: "VisionLanguageConfig") -> Dict[str, torch.Tensor]: # Temporary patch to make LLaVA-NeXT usable _, _, h, w = vlm_config.image_input_shape image = self.image.resize((w, h)) @@ -444,8 +444,8 @@ def __init__(self, image_features: torch.Tensor) -> None: self.image_features = image_features def get_input_kwargs( - self, model_config: ModelConfig, - vlm_config: VisionLanguageConfig) -> Dict[str, torch.Tensor]: + self, model_config: "ModelConfig", + vlm_config: "VisionLanguageConfig") -> Dict[str, torch.Tensor]: return {"image_features": self.image_features}