Skip to content

Commit

Permalink
chore:improve thinking display for llm from xinference and ollama pro… (
Browse files Browse the repository at this point in the history
  • Loading branch information
leslie2046 authored Feb 7, 2025
1 parent 1caa578 commit 3f42fab
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 23 deletions.
10 changes: 1 addition & 9 deletions api/core/model_runtime/model_providers/ollama/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,6 @@ def _handle_generate_stream_response(
"""
full_text = ""
chunk_index = 0
is_reasoning_started = False

def create_final_llm_result_chunk(
index: int, message: AssistantPromptMessage, finish_reason: str
Expand Down Expand Up @@ -368,14 +367,7 @@ def create_final_llm_result_chunk(

# transform assistant message to prompt message
text = chunk_json["response"]
if "<think>" in text:
is_reasoning_started = True
text = text.replace("<think>", "> 💭 ")
elif "</think>" in text:
is_reasoning_started = False
text = text.replace("</think>", "") + "\n\n"
elif is_reasoning_started:
text = text.replace("\n", "\n> ")
text = self._wrap_thinking_by_tag(text)

assistant_prompt_message = AssistantPromptMessage(content=text)

Expand Down
16 changes: 2 additions & 14 deletions api/core/model_runtime/model_providers/xinference/llm/llm.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import re
from collections.abc import Generator, Iterator
from typing import Optional, cast

Expand Down Expand Up @@ -636,16 +635,13 @@ def _handle_chat_stream_response(
handle stream chat generate response
"""
full_response = ""
is_reasoning_started_tag = False
for chunk in resp:
if len(chunk.choices) == 0:
continue
delta = chunk.choices[0]
if delta.finish_reason is None and (delta.delta.content is None or delta.delta.content == ""):
continue
delta_content = delta.delta.content
if not delta_content:
delta_content = ""
delta_content = delta.delta.content or ""
# check if there is a tool call in the response
function_call = None
tool_calls = []
Expand All @@ -658,15 +654,7 @@ def _handle_chat_stream_response(
if function_call:
assistant_message_tool_calls += [self._extract_response_function_call(function_call)]

if not is_reasoning_started_tag and "<think>" in delta_content:
is_reasoning_started_tag = True
delta_content = "> 💭 " + delta_content.replace("<think>", "")
elif is_reasoning_started_tag and "</think>" in delta_content:
delta_content = delta_content.replace("</think>", "") + "\n\n"
is_reasoning_started_tag = False
elif is_reasoning_started_tag:
if "\n" in delta_content:
delta_content = re.sub(r"\n(?!(>|\n))", "\n> ", delta_content)
delta_content = self._wrap_thinking_by_tag(delta_content)
# transform assistant message to prompt message
assistant_prompt_message = AssistantPromptMessage(
content=delta_content or "", tool_calls=assistant_message_tool_calls
Expand Down

0 comments on commit 3f42fab

Please sign in to comment.