diff --git a/py/core/agent/base.py b/py/core/agent/base.py index 6c82d724c..9adeafac4 100644 --- a/py/core/agent/base.py +++ b/py/core/agent/base.py @@ -162,7 +162,6 @@ async def process_llm_response( # type: ignore content_buffer = "" async for chunk in stream: - print("chunk = ", chunk) delta = chunk.choices[0].delta if delta.tool_calls: for tool_call in delta.tool_calls: diff --git a/py/core/main/services/retrieval_service.py b/py/core/main/services/retrieval_service.py index a0d869101..9ce91eae5 100644 --- a/py/core/main/services/retrieval_service.py +++ b/py/core/main/services/retrieval_service.py @@ -390,7 +390,6 @@ async def agent( ) if rag_generation_config.stream: - # collected_output = [] async def stream_response(): try: async with manage_run( @@ -409,8 +408,6 @@ async def stream_response(): rag_generation_config=rag_generation_config, include_title_if_available=include_title_if_available, ): - # print(chunk) - # collected_output.append(chunk) yield chunk except Exception as e: logger.error(f"Error streaming agent output: {e}")