generated from aniketmaurya/python-project-template
-
Notifications
You must be signed in to change notification settings - Fork 5
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update * update * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
- Loading branch information
1 parent
5e6f5f3
commit a48c67e
Showing
6 changed files
with
143 additions
and
28 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,47 @@ | ||
import logging | ||
|
||
import requests | ||
|
||
|
||
class Client: | ||
def __init__(self): | ||
pass | ||
|
||
|
||
class vLLMClient(Client): | ||
def __init__(self, model: str, base_url="http://localhost:8000/endpoint"): | ||
from transformers import AutoTokenizer | ||
|
||
super().__init__() | ||
self.tokenizer = AutoTokenizer.from_pretrained(model) | ||
self.context = [] | ||
self.base_url = base_url | ||
|
||
def chat(self, prompt: str, keep_context=False): | ||
new_msg = {"role": "user", "content": prompt} | ||
if keep_context: | ||
self.context.append(new_msg) | ||
messages = self.context | ||
else: | ||
messages = [new_msg] | ||
|
||
logging.info(messages) | ||
chat = self.tokenizer.apply_chat_template(messages, tokenize=False) | ||
headers = { | ||
"accept": "application/json", | ||
"Content-Type": "application/json", | ||
} | ||
data = { | ||
"prompt": chat, | ||
"temperature": 0.8, | ||
"top_p": 1, | ||
"max_tokens": 500, | ||
"stop": [], | ||
} | ||
|
||
response = requests.post(self.base_url, headers=headers, json=data).json() | ||
if keep_context: | ||
self.context.append( | ||
{"role": "assistant", "content": response["outputs"][0]["text"]} | ||
) | ||
return response |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,46 +1,65 @@ | ||
import os | ||
from typing import List | ||
import logging | ||
from typing import Any, List, Optional | ||
|
||
from fastapi import FastAPI | ||
from pydantic import BaseModel | ||
from vllm import LLM, SamplingParams | ||
|
||
tensor_parallel_size = int(os.environ.get("DEVICES", "1")) | ||
print("tensor_parallel_size: ", tensor_parallel_size) | ||
from fastserve.core import FastServe | ||
|
||
llm = LLM("meta-llama/Llama-2-7b-hf", tensor_parallel_size=tensor_parallel_size) | ||
logger = logging.getLogger(__name__) | ||
|
||
|
||
class PromptRequest(BaseModel): | ||
prompt: str | ||
temperature: float = 1 | ||
prompt: str = "Write a python function to resize image to 224x224" | ||
temperature: float = 0.8 | ||
top_p: float = 1.0 | ||
max_tokens: int = 200 | ||
stop: List[str] = [] | ||
|
||
|
||
class ResponseModel(BaseModel): | ||
prompt: str | ||
prompt_token_ids: List # The token IDs of the prompt. | ||
outputs: List[str] # The output sequences of the request. | ||
prompt_token_ids: Optional[List] = None # The token IDs of the prompt. | ||
text: str # The output sequences of the request. | ||
finished: bool # Whether the whole request is finished. | ||
|
||
|
||
app = FastAPI() | ||
class ServeVLLM(FastServe): | ||
def __init__( | ||
self, | ||
model, | ||
batch_size=1, | ||
timeout=0.0, | ||
*args, | ||
**kwargs, | ||
): | ||
from vllm import LLM | ||
|
||
self.llm = LLM(model) | ||
self.args = args | ||
self.kwargs = kwargs | ||
super().__init__( | ||
batch_size, | ||
timeout, | ||
input_schema=PromptRequest, | ||
# response_schema=ResponseModel, | ||
) | ||
|
||
def __call__(self, request: PromptRequest) -> Any: | ||
from vllm import SamplingParams | ||
|
||
sampling_params = SamplingParams( | ||
temperature=request.temperature, | ||
top_p=request.top_p, | ||
max_tokens=request.max_tokens, | ||
) | ||
result = self.llm.generate(request.prompt, sampling_params=sampling_params) | ||
logger.info(result) | ||
return result | ||
|
||
@app.post("/serve", response_model=ResponseModel) | ||
def serve(request: PromptRequest): | ||
sampling_params = SamplingParams( | ||
max_tokens=request.max_tokens, | ||
temperature=request.temperature, | ||
stop=request.stop, | ||
) | ||
def handle(self, batch: List[PromptRequest]) -> List: | ||
responses = [] | ||
for request in batch: | ||
output = self(request) | ||
responses.extend(output) | ||
|
||
result = llm.generate(request.prompt, sampling_params=sampling_params)[0] | ||
response = ResponseModel( | ||
prompt=request.prompt, | ||
prompt_token_ids=result.prompt_token_ids, | ||
outputs=result.outputs, | ||
finished=result.finished, | ||
) | ||
return response | ||
return responses |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters