Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Frontend] Add readiness and liveness endpoints to OpenAI API server #7078

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions tests/entrypoints/openai/test_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,3 +59,13 @@ async def test_log_metrics(client: openai.AsyncOpenAI):
response = requests.get(base_url + "/metrics")

assert response.status_code == HTTPStatus.OK


@pytest.mark.asyncio
async def test_get_readiness_ok(client: openai.AsyncOpenAI):
"""Test the technical route /readiness when the model is fully loaded"""
base_url = str(client.base_url)[:-3].strip("/")

response = requests.get(base_url + "/ready")

assert response.status_code == HTTPStatus.OK
25 changes: 24 additions & 1 deletion vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

import fastapi
import uvicorn
from fastapi import APIRouter, Request
from fastapi import APIRouter, Request, HTTPException
from fastapi.exceptions import RequestValidationError
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, Response, StreamingResponse
Expand Down Expand Up @@ -90,6 +90,29 @@ async def health() -> Response:
return Response(status_code=200)


@router.get(
"/ready",
name="readiness",
tags=["technical"],
)
async def get_readiness() -> Response:
"""Readiness probe for k8s"""
try :
model_executor = openai_serving_chat.engine.engine.model_executor
model_runner = model_executor.driver_worker.model_runner

# check if model weight are loaded in gpu memory
model_weights = model_runner.model_memory_usage

# check if KV cache has been set up
num_cpu_blocks = model_runner.num_cpu_blocks
num_gpu_blocks = model_runner.num_gpu_blocks

if model_weights > 0 and num_cpu_blocks > 0 and num_gpu_blocks > 0 :
return Response(status_code=200)
except: HTTPException(status_code=500, detail="Model not loaded yet or KV cache not setup yet")


@router.post("/tokenize")
async def tokenize(request: TokenizeRequest):
generator = await openai_serving_tokenization.create_tokenize(request)
Expand Down
2 changes: 1 addition & 1 deletion vllm/entrypoints/openai/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -719,4 +719,4 @@ class DetokenizeRequest(OpenAIBaseModel):


class DetokenizeResponse(OpenAIBaseModel):
prompt: str
prompt: str
Copy link
Member

@DarkLight1337 DarkLight1337 Aug 5, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please avoid deleting the last line here. (Since otherwise, the file remains unchanged)