From 51b33cfe089f954f2c85ebc224c82eb8d85b3d70 Mon Sep 17 00:00:00 2001 From: Novice Date: Mon, 10 Feb 2025 08:45:20 +0800 Subject: [PATCH] bump agent strategy version to 0.0.5 (#188) Co-authored-by: Novice Lee --- agent-strategies/cot_agent/manifest.yaml | 2 +- agent-strategies/cot_agent/requirements.txt | 2 +- .../cot_agent/strategies/ReAct.py | 73 ++++++++++-------- .../cot_agent/strategies/function_calling.py | 76 +++++++++++-------- 4 files changed, 87 insertions(+), 66 deletions(-) diff --git a/agent-strategies/cot_agent/manifest.yaml b/agent-strategies/cot_agent/manifest.yaml index b68b340..ec40289 100644 --- a/agent-strategies/cot_agent/manifest.yaml +++ b/agent-strategies/cot_agent/manifest.yaml @@ -1,4 +1,4 @@ -version: 0.0.5 +version: 0.0.6 type: plugin author: "langgenius" name: "agent" diff --git a/agent-strategies/cot_agent/requirements.txt b/agent-strategies/cot_agent/requirements.txt index ce20ec7..83f69b8 100644 --- a/agent-strategies/cot_agent/requirements.txt +++ b/agent-strategies/cot_agent/requirements.txt @@ -1 +1 @@ -dify_plugin~=0.0.1b62 +dify_plugin~=0.0.1b63 diff --git a/agent-strategies/cot_agent/strategies/ReAct.py b/agent-strategies/cot_agent/strategies/ReAct.py index 0bfc4f9..aa3fef3 100644 --- a/agent-strategies/cot_agent/strategies/ReAct.py +++ b/agent-strategies/cot_agent/strategies/ReAct.py @@ -13,7 +13,7 @@ ToolPromptMessage, UserPromptMessage, ) -from dify_plugin.entities.tool import ToolInvokeMessage, ToolProviderType +from dify_plugin.entities.tool import LogMetadata, ToolInvokeMessage, ToolProviderType from dify_plugin.interfaces.agent import ( AgentModelConfig, AgentScratchpadUnit, @@ -130,7 +130,7 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: label=f"ROUND {iteration_step}", data={}, metadata={ - "started_at": round_started_at, + LogMetadata.STARTED_AT: round_started_at, }, status=ToolInvokeMessage.LogMessage.LogStatus.START, ) @@ -171,7 +171,10 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: model_log = self.create_log_message( label=f"{model.model} Thought", data={}, - metadata={"start_at": model_started_at, "provider": model.provider}, + metadata={ + LogMetadata.STARTED_AT: model_started_at, + LogMetadata.PROVIDER: model.provider, + }, parent=round_log, status=ToolInvokeMessage.LogMessage.LogStatus.START, ) @@ -183,6 +186,7 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: # detect action assert scratchpad.agent_response is not None scratchpad.agent_response += json.dumps(chunk.model_dump()) + scratchpad.action_str = json.dumps(chunk.model_dump()) scratchpad.action = action else: @@ -190,13 +194,11 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: scratchpad.thought = scratchpad.thought or "" scratchpad.agent_response += chunk scratchpad.thought += chunk - scratchpad.thought = ( scratchpad.thought.strip() if scratchpad.thought else "I am thinking about how to help you" ) - agent_scratchpad.append(scratchpad) # get llm usage @@ -206,25 +208,27 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: else: usage_dict["usage"] = LLMUsage.empty_usage() - if not scratchpad.is_final(): - pass + action = ( + scratchpad.action.to_dict() + if scratchpad.action + else {"action": scratchpad.agent_response} + ) + yield self.finish_log_message( log=model_log, - data={ - "output": scratchpad.agent_response, - }, + data={"thought": scratchpad.thought, **action}, metadata={ - "started_at": model_started_at, - "finished_at": time.perf_counter(), - "elapsed_time": time.perf_counter() - model_started_at, - "provider": model.provider, - "total_price": usage_dict["usage"].total_price + LogMetadata.STARTED_AT: model_started_at, + LogMetadata.FINISHED_AT: time.perf_counter(), + LogMetadata.ELAPSED_TIME: time.perf_counter() - model_started_at, + LogMetadata.PROVIDER: model.provider, + LogMetadata.TOTAL_PRICE: usage_dict["usage"].total_price if usage_dict["usage"] else 0, - "currency": usage_dict["usage"].currency + LogMetadata.CURRENCY: usage_dict["usage"].currency if usage_dict["usage"] else "", - "total_tokens": usage_dict["usage"].total_tokens + LogMetadata.TOTAL_TOKENS: usage_dict["usage"].total_tokens if usage_dict["usage"] else 0, }, @@ -252,8 +256,10 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: label=f"CALL {tool_name}", data={}, metadata={ - "started_at": time.perf_counter(), - "provider": tool_instances[tool_name].identity.provider + LogMetadata.STARTED_AT: time.perf_counter(), + LogMetadata.PROVIDER: tool_instances[ + tool_name + ].identity.provider if tool_instances.get(tool_name) else "", }, @@ -275,12 +281,15 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: "meta": tool_invoke_meta.to_dict(), }, metadata={ - "started_at": tool_call_started_at, - "provider": tool_instances[tool_name].identity.provider + LogMetadata.STARTED_AT: tool_call_started_at, + LogMetadata.PROVIDER: tool_instances[ + tool_name + ].identity.provider if tool_instances.get(tool_name) else "", - "finished_at": time.perf_counter(), - "elapsed_time": time.perf_counter() - tool_call_started_at, + LogMetadata.FINISHED_AT: time.perf_counter(), + LogMetadata.ELAPSED_TIME: time.perf_counter() + - tool_call_started_at, }, ) @@ -297,16 +306,16 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: }, }, metadata={ - "started_at": round_started_at, - "finished_at": time.perf_counter(), - "elapsed_time": time.perf_counter() - round_started_at, - "total_price": usage_dict["usage"].total_price + LogMetadata.STARTED_AT: round_started_at, + LogMetadata.FINISHED_AT: time.perf_counter(), + LogMetadata.ELAPSED_TIME: time.perf_counter() - round_started_at, + LogMetadata.TOTAL_PRICE: usage_dict["usage"].total_price if usage_dict["usage"] else 0, - "currency": usage_dict["usage"].currency + LogMetadata.CURRENCY: usage_dict["usage"].currency if usage_dict["usage"] else "", - "total_tokens": usage_dict["usage"].total_tokens + LogMetadata.TOTAL_TOKENS: usage_dict["usage"].total_tokens if usage_dict["usage"] else 0, }, @@ -317,13 +326,13 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: yield self.create_json_message( { "execution_metadata": { - "total_price": llm_usage["usage"].total_price + LogMetadata.TOTAL_PRICE: llm_usage["usage"].total_price if llm_usage["usage"] is not None else 0, - "currency": llm_usage["usage"].currency + LogMetadata.CURRENCY: llm_usage["usage"].currency if llm_usage["usage"] is not None else "", - "total_tokens": llm_usage["usage"].total_tokens + LogMetadata.TOTAL_TOKENS: llm_usage["usage"].total_tokens if llm_usage["usage"] is not None else 0, } diff --git a/agent-strategies/cot_agent/strategies/function_calling.py b/agent-strategies/cot_agent/strategies/function_calling.py index 63bd3a5..1cd95e1 100644 --- a/agent-strategies/cot_agent/strategies/function_calling.py +++ b/agent-strategies/cot_agent/strategies/function_calling.py @@ -15,11 +15,12 @@ AssistantPromptMessage, PromptMessage, PromptMessageContentType, + PromptMessageRole, SystemPromptMessage, ToolPromptMessage, UserPromptMessage, ) -from dify_plugin.entities.tool import ToolInvokeMessage, ToolProviderType +from dify_plugin.entities.tool import LogMetadata, ToolInvokeMessage, ToolProviderType from dify_plugin.interfaces.agent import AgentModelConfig, AgentStrategy, ToolEntity from pydantic import BaseModel, Field @@ -76,7 +77,9 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: query = fc_params.query self.query = query instruction = fc_params.instruction - init_prompt_messages = [SystemPromptMessage(content=instruction)] + init_prompt_messages = [ + PromptMessage(role=PromptMessageRole.SYSTEM, content=instruction) + ] tools = fc_params.tools tool_instances = {tool.identity.name: tool for tool in tools} if tools else {} model = fc_params.model @@ -103,7 +106,7 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: label=f"ROUND {iteration_step}", data={}, metadata={ - "started_at": round_started_at, + LogMetadata.STARTED_AT: round_started_at, }, status=ToolInvokeMessage.LogMessage.LogStatus.START, ) @@ -126,7 +129,10 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: model_log = self.create_log_message( label=f"{model.model} Thought", data={}, - metadata={"start_at": model_started_at, "provider": model.provider}, + metadata={ + LogMetadata.STARTED_AT: model_started_at, + LogMetadata.PROVIDER: model.provider, + }, parent=round_log, status=ToolInvokeMessage.LogMessage.LogStatus.START, ) @@ -153,10 +159,7 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: current_llm_usage = None if isinstance(chunks, Generator): - is_first_chunk = True for chunk in chunks: - if is_first_chunk: - is_first_chunk = False # check if there is any tool call if self.check_tool_calls(chunk): function_call_state = True @@ -239,18 +242,22 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: data={ "output": response, "tool_name": tool_call_names, - "tool_input": tool_call_inputs, + "tool_input": { + tool_call[1]: tool_call[2] for tool_call in tool_calls + }, }, metadata={ - "started_at": model_started_at, - "finished_at": time.perf_counter(), - "elapsed_time": time.perf_counter() - model_started_at, - "provider": model.provider, - "total_price": current_llm_usage.total_price + LogMetadata.STARTED_AT: model_started_at, + LogMetadata.FINISHED_AT: time.perf_counter(), + LogMetadata.ELAPSED_TIME: time.perf_counter() - model_started_at, + LogMetadata.PROVIDER: model.provider, + LogMetadata.TOTAL_PRICE: current_llm_usage.total_price if current_llm_usage else 0, - "currency": current_llm_usage.currency if current_llm_usage else "", - "total_tokens": current_llm_usage.total_tokens + LogMetadata.CURRENCY: current_llm_usage.currency + if current_llm_usage + else "", + LogMetadata.TOTAL_PRICE: current_llm_usage.total_tokens if current_llm_usage else 0, }, @@ -284,8 +291,8 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: label=f"CALL {tool_call_name}", data={}, metadata={ - "started_at": time.perf_counter(), - "provider": tool_instance.identity.provider, + LogMetadata.STARTED_AT: time.perf_counter(), + LogMetadata.PROVIDER: tool_instance.identity.provider, }, parent=round_log, status=ToolInvokeMessage.LogMessage.LogStatus.START, @@ -340,12 +347,14 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: result += f"tool response: {text}." else: result += f"tool response: {response.message!r}." - tool_invoke_meta = ToolInvokeMeta.error_instance("") tool_response = { "tool_call_id": tool_call_id, "tool_call_name": tool_call_name, + "tool_call_input": { + **tool_instance.runtime_parameters, + **tool_call_args, + }, "tool_response": result, - "meta": tool_invoke_meta.to_dict(), } yield self.finish_log_message( @@ -354,10 +363,11 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: "output": tool_response, }, metadata={ - "started_at": tool_call_started_at, - "provider": tool_instance.identity.provider, - "finished_at": time.perf_counter(), - "elapsed_time": time.perf_counter() - tool_call_started_at, + LogMetadata.STARTED_AT: tool_call_started_at, + LogMetadata.PROVIDER: tool_instance.identity.provider, + LogMetadata.FINISHED_AT: time.perf_counter(), + LogMetadata.ELAPSED_TIME: time.perf_counter() + - tool_call_started_at, }, ) tool_responses.append(tool_response) @@ -384,14 +394,16 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: }, }, metadata={ - "started_at": round_started_at, - "finished_at": time.perf_counter(), - "elapsed_time": time.perf_counter() - round_started_at, - "total_price": current_llm_usage.total_price + LogMetadata.STARTED_AT: round_started_at, + LogMetadata.FINISHED_AT: time.perf_counter(), + LogMetadata.ELAPSED_TIME: time.perf_counter() - round_started_at, + LogMetadata.TOTAL_PRICE: current_llm_usage.total_price if current_llm_usage else 0, - "currency": current_llm_usage.currency if current_llm_usage else "", - "total_tokens": current_llm_usage.total_tokens + LogMetadata.CURRENCY: current_llm_usage.currency + if current_llm_usage + else "", + LogMetadata.TOTAL_PRICE: current_llm_usage.total_tokens if current_llm_usage else 0, }, @@ -401,13 +413,13 @@ def _invoke(self, parameters: dict[str, Any]) -> Generator[AgentInvokeMessage]: yield self.create_json_message( { "execution_metadata": { - "total_price": llm_usage["usage"].total_price + LogMetadata.TOTAL_PRICE: llm_usage["usage"].total_price if llm_usage["usage"] is not None else 0, - "currency": llm_usage["usage"].currency + LogMetadata.CURRENCY: llm_usage["usage"].currency if llm_usage["usage"] is not None else "", - "total_tokens": llm_usage["usage"].total_tokens + LogMetadata.TOTAL_PRICE: llm_usage["usage"].total_tokens if llm_usage["usage"] is not None else 0, }