Skip to content

Commit

Permalink
feat: add think display for volcengine and generic openapi (#13234)
Browse files Browse the repository at this point in the history
Signed-off-by: xhe <xw897002528@gmail.com>
  • Loading branch information
xhebox authored Feb 6, 2025
1 parent a6a2503 commit 5a685f7
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,8 @@ def get_tool_call(tool_call_id: str):

finish_reason = None # The default value of finish_reason is None
message_id, usage = None, None
is_reasoning_started = False
is_reasoning_started_tag = False
for chunk in response.iter_lines(decode_unicode=True, delimiter=delimiter):
chunk = chunk.strip()
if chunk:
Expand Down Expand Up @@ -514,6 +516,33 @@ def get_tool_call(tool_call_id: str):
delta = choice["delta"]
delta_content = delta.get("content")

if not is_reasoning_started_tag and "<think>" in delta_content:
is_reasoning_started_tag = True
delta_content = "> 💭 " + delta_content.replace("<think>", "")
elif is_reasoning_started_tag and "</think>" in delta_content:
delta_content = delta_content.replace("</think>", "") + "\n\n"
is_reasoning_started_tag = False
elif is_reasoning_started_tag:
if "\n\n" in delta_content:
delta_content = delta_content.replace("\n\n", "\n> ")
elif "\n" in delta_content:
delta_content = delta_content.replace("\n", "\n> ")

reasoning_content = delta.get("reasoning_content")
if reasoning_content:
if not is_reasoning_started:
delta_content = "> 💭 " + reasoning_content
is_reasoning_started = True
elif "\n\n" in delta_content:
delta_content = reasoning_content.replace("\n\n", "\n> ")
elif "\n" in delta_content:
delta_content = reasoning_content.replace("\n", "\n> ")
elif is_reasoning_started:
# If we were in reasoning mode but now getting regular content,
# add \n\n to close the reasoning block
delta_content = "\n\n" + delta_content
is_reasoning_started = False

assistant_message_tool_calls = None

if "tool_calls" in delta and credentials.get("function_calling_type", "no_call") == "tool_call":
Expand Down
27 changes: 24 additions & 3 deletions api/core/model_runtime/model_providers/volcengine_maas/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,15 +247,36 @@ def _generate_v3(
req_params["tools"] = tools

def _handle_stream_chat_response(chunks: Generator[ChatCompletionChunk]) -> Generator:
is_reasoning_started = False
for chunk in chunks:
content = ""
if chunk.choices:
if hasattr(chunk.choices[0].delta, "reasoning_content"):
delta_content = ""
if not is_reasoning_started:
is_reasoning_started = True
delta_content = "> 💭 " + chunk.choices[0].delta.reasoning_content
else:
delta_content = chunk.choices[0].delta.reasoning_content

if "\n\n" in delta_content:
delta_content = delta_content.replace("\n\n", "\n> ")
elif "\n" in delta_content:
delta_content = delta_content.replace("\n", "\n> ")

content = delta_content
elif is_reasoning_started:
content = "\n\n" + chunk.choices[0].delta.content
is_reasoning_started = False
else:
content = chunk.choices[0].delta.content

yield LLMResultChunk(
model=model,
prompt_messages=prompt_messages,
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(
content=chunk.choices[0].delta.content if chunk.choices else "", tool_calls=[]
),
message=AssistantPromptMessage(content=content, tool_calls=[]),
usage=self._calc_response_usage(
model=model,
credentials=credentials,
Expand Down

0 comments on commit 5a685f7

Please sign in to comment.