Skip to content

Commit

Permalink
[Bugfix] fix OpenAI API server startup with --disable-frontend-multip…
Browse files Browse the repository at this point in the history
…rocessing (vllm-project#8537)

Signed-off-by: Sumit Dubey <sumit.dubey2@ibm.com>
  • Loading branch information
dtrifiro authored and sumitd2 committed Nov 14, 2024
1 parent c7136b0 commit 23ea71c
Show file tree
Hide file tree
Showing 2 changed files with 63 additions and 5 deletions.
58 changes: 57 additions & 1 deletion tests/entrypoints/openai/test_basic.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from http import HTTPStatus
from typing import List

import openai
import pytest
Expand All @@ -12,8 +13,44 @@
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"


@pytest.fixture(scope='module')
def server_args(request: pytest.FixtureRequest) -> List[str]:
""" Provide extra arguments to the server via indirect parametrization
Usage:
>>> @pytest.mark.parametrize(
>>> "server_args",
>>> [
>>> ["--disable-frontend-multiprocessing"],
>>> [
>>> "--model=NousResearch/Hermes-3-Llama-3.1-70B",
>>> "--enable-auto-tool-choice",
>>> ],
>>> ],
>>> indirect=True,
>>> )
>>> def test_foo(server, client):
>>> ...
This will run `test_foo` twice with servers with:
- `--disable-frontend-multiprocessing`
- `--model=NousResearch/Hermes-3-Llama-3.1-70B --enable-auto-tool-choice`.
"""
if not hasattr(request, "param"):
return []

val = request.param

if isinstance(val, str):
return [val]

return request.param


@pytest.fixture(scope="module")
def server():
def server(server_args):
args = [
# use half precision for speed and memory savings in CI environment
"--dtype",
Expand All @@ -23,6 +60,7 @@ def server():
"--enforce-eager",
"--max-num-seqs",
"128",
*server_args,
]

with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
Expand All @@ -35,6 +73,15 @@ async def client(server):
yield async_client


@pytest.mark.parametrize(
"server_args",
[
pytest.param([], id="default-frontend-multiprocessing"),
pytest.param(["--disable-frontend-multiprocessing"],
id="disable-frontend-multiprocessing")
],
indirect=True,
)
@pytest.mark.asyncio
async def test_show_version(client: openai.AsyncOpenAI):
base_url = str(client.base_url)[:-3].strip("/")
Expand All @@ -45,6 +92,15 @@ async def test_show_version(client: openai.AsyncOpenAI):
assert response.json() == {"version": VLLM_VERSION}


@pytest.mark.parametrize(
"server_args",
[
pytest.param([], id="default-frontend-multiprocessing"),
pytest.param(["--disable-frontend-multiprocessing"],
id="disable-frontend-multiprocessing")
],
indirect=True,
)
@pytest.mark.asyncio
async def test_check_health(client: openai.AsyncOpenAI):
base_url = str(client.base_url)[:-3].strip("/")
Expand Down
10 changes: 6 additions & 4 deletions vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -537,8 +537,11 @@ async def run_server(args, **uvicorn_kwargs) -> None:
raise KeyError(f"invalid tool call parser: {args.tool_call_parser} "
f"(chose from {{ {','.join(valide_tool_parses)} }})")

temp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
temp_socket.bind(("", args.port))
# workaround to make sure that we bind the port before the engine is set up.
# This avoids race conditions with ray.
# see https://github.com/vllm-project/vllm/issues/8204
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", args.port))

def signal_handler(*_) -> None:
# Interrupt server on sigterm while initializing
Expand All @@ -552,8 +555,6 @@ def signal_handler(*_) -> None:
model_config = await engine_client.get_model_config()
init_app_state(engine_client, model_config, app.state, args)

temp_socket.close()

shutdown_task = await serve_http(
app,
host=args.host,
Expand All @@ -564,6 +565,7 @@ def signal_handler(*_) -> None:
ssl_certfile=args.ssl_certfile,
ssl_ca_certs=args.ssl_ca_certs,
ssl_cert_reqs=args.ssl_cert_reqs,
fd=sock.fileno(),
**uvicorn_kwargs,
)

Expand Down

0 comments on commit 23ea71c

Please sign in to comment.