From 0d13aee15cdcf748324056f58ca9cea9888dd3b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=91=86=E8=90=8C=E9=97=B7=E6=B2=B9=E7=93=B6?= <253605712@qq.com> Date: Thu, 6 Feb 2025 15:32:10 +0800 Subject: [PATCH] feat:add deepseek r1 think display for ollama provider (#13272) --- api/core/model_runtime/model_providers/ollama/llm/llm.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/api/core/model_runtime/model_providers/ollama/llm/llm.py b/api/core/model_runtime/model_providers/ollama/llm/llm.py index 3ae728d4b36985..0377731175ebbe 100644 --- a/api/core/model_runtime/model_providers/ollama/llm/llm.py +++ b/api/core/model_runtime/model_providers/ollama/llm/llm.py @@ -314,6 +314,7 @@ def _handle_generate_stream_response( """ full_text = "" chunk_index = 0 + is_reasoning_started = False def create_final_llm_result_chunk( index: int, message: AssistantPromptMessage, finish_reason: str @@ -367,6 +368,14 @@ def create_final_llm_result_chunk( # transform assistant message to prompt message text = chunk_json["response"] + if "" in text: + is_reasoning_started = True + text = text.replace("", "> 💭 ") + elif "" in text: + is_reasoning_started = False + text = text.replace("", "") + "\n\n" + elif is_reasoning_started: + text = text.replace("\n", "\n> ") assistant_prompt_message = AssistantPromptMessage(content=text)