diff --git a/letta/__init__.py b/letta/__init__.py index 3936cbb72b..2aabb1ba2c 100644 --- a/letta/__init__.py +++ b/letta/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.6.4" +__version__ = "0.6.5" # import clients from letta.client.client import LocalClient, RESTClient, create_client diff --git a/letta/agent.py b/letta/agent.py index 532c4e13a7..485f2112b9 100644 --- a/letta/agent.py +++ b/letta/agent.py @@ -604,6 +604,9 @@ def _get_ai_reply( and len(self.tool_rules_solver.init_tool_rules) > 0 ): force_tool_call = self.tool_rules_solver.init_tool_rules[0].tool_name + # Force a tool call if exactly one tool is specified + elif step_count is not None and step_count > 0 and len(allowed_tool_names) == 1: + force_tool_call = allowed_tool_names[0] for attempt in range(1, empty_response_retry_limit + 1): try: diff --git a/letta/llm_api/anthropic.py b/letta/llm_api/anthropic.py index 912ac4567f..4cca920a5c 100644 --- a/letta/llm_api/anthropic.py +++ b/letta/llm_api/anthropic.py @@ -262,10 +262,24 @@ def convert_anthropic_response_to_chatcompletion( ), ) ] - else: - # Just inner mono - content = strip_xml_tags(string=response_json["content"][0]["text"], tag=inner_thoughts_xml_tag) - tool_calls = None + elif len(response_json["content"]) == 1: + if response_json["content"][0]["type"] == "tool_use": + # function call only + content = None + tool_calls = [ + ToolCall( + id=response_json["content"][0]["id"], + type="function", + function=FunctionCall( + name=response_json["content"][0]["name"], + arguments=json.dumps(response_json["content"][0]["input"], indent=2), + ), + ) + ] + else: + # inner mono only + content = strip_xml_tags(string=response_json["content"][0]["text"], tag=inner_thoughts_xml_tag) + tool_calls = None else: raise RuntimeError("Unexpected type for content in response_json.") @@ -327,6 +341,14 @@ def anthropic_chat_completions_request( if anthropic_tools is not None: data["tools"] = anthropic_tools + # TODO: Add support for other tool_choice options like "auto", "any" + if len(anthropic_tools) == 1: + data["tool_choice"] = { + "type": "tool", # Changed from "function" to "tool" + "name": anthropic_tools[0]["name"], # Directly specify name without nested "function" object + "disable_parallel_tool_use": True # Force single tool use + } + # Move 'system' to the top level # 'messages: Unexpected role "system". The Messages API accepts a top-level `system` parameter, not "system" as an input message role.' assert data["messages"][0]["role"] == "system", f"Expected 'system' role in messages[0]:\n{data['messages'][0]}" @@ -362,7 +384,6 @@ def anthropic_chat_completions_request( data.pop("top_p", None) data.pop("presence_penalty", None) data.pop("user", None) - data.pop("tool_choice", None) response_json = make_post_request(url, headers, data) return convert_anthropic_response_to_chatcompletion(response_json=response_json, inner_thoughts_xml_tag=inner_thoughts_xml_tag) diff --git a/poetry.lock b/poetry.lock index 9130cc158c..80453badc6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -726,13 +726,13 @@ test = ["pytest"] [[package]] name = "composio-core" -version = "0.5.51" +version = "0.6.3" description = "Core package to act as a bridge between composio platform and other services." optional = false python-versions = "<4,>=3.9" files = [ - {file = "composio_core-0.5.51-py3-none-any.whl", hash = "sha256:9bde524029967d8ff86a97986ce68cc4797c9b7ad6ea00450844af1238033a83"}, - {file = "composio_core-0.5.51.tar.gz", hash = "sha256:052f8af227d1a8121a2475ddce96ba09d5d556ad34cc091672dfe14b297c6742"}, + {file = "composio_core-0.6.3-py3-none-any.whl", hash = "sha256:981a9856781b791242f947a9685a18974d8a012ac7fab2c09438e1b19610d6a2"}, + {file = "composio_core-0.6.3.tar.gz", hash = "sha256:13098b20d8832e74453ca194889305c935432156fc07be91dfddf76561ad591b"}, ] [package.dependencies] @@ -744,7 +744,7 @@ inflection = ">=0.5.1" jsonref = ">=1.1.0" jsonschema = ">=4.21.1,<5" paramiko = ">=3.4.1" -pydantic = ">=2.6.4,<2.10" +pydantic = ">=2.6.4" pyperclip = ">=1.8.2,<2" pysher = "1.0.8" requests = ">=2.31.0,<3" @@ -754,7 +754,7 @@ sentry-sdk = ">=2.0.0" uvicorn = "*" [package.extras] -all = ["aiohttp", "click", "diskcache", "docker (>=7.1.0)", "e2b (>=0.17.2a37,<1)", "e2b-code-interpreter", "fastapi", "flake8", "gql", "importlib-metadata (>=4.8.1)", "inflection (>=0.5.1)", "jsonref (>=1.1.0)", "jsonschema (>=4.21.1,<5)", "networkx", "paramiko (>=3.4.1)", "pathspec", "pydantic (>=2.6.4,<2.10)", "pygments", "pyperclip (>=1.8.2,<2)", "pysher (==1.0.8)", "requests (>=2.31.0,<3)", "requests_toolbelt", "rich (>=13.7.1,<14)", "ruff", "semver (>=2.13.0)", "sentry-sdk (>=2.0.0)", "transformers", "tree_sitter (==0.21.3)", "tree_sitter_languages", "uvicorn"] +all = ["aiohttp", "click", "diskcache", "docker (>=7.1.0)", "e2b (>=0.17.2a37,<1)", "e2b-code-interpreter", "fastapi", "flake8", "gql", "importlib-metadata (>=4.8.1)", "inflection (>=0.5.1)", "jsonref (>=1.1.0)", "jsonschema (>=4.21.1,<5)", "networkx", "paramiko (>=3.4.1)", "pathspec", "pydantic (>=2.6.4)", "pygments", "pyperclip (>=1.8.2,<2)", "pysher (==1.0.8)", "requests (>=2.31.0,<3)", "requests_toolbelt", "rich (>=13.7.1,<14)", "ruff", "semver (>=2.13.0)", "sentry-sdk (>=2.0.0)", "transformers", "tree_sitter (==0.21.3)", "tree_sitter_languages", "uvicorn"] docker = ["docker (>=7.1.0)"] e2b = ["e2b (>=0.17.2a37,<1)", "e2b-code-interpreter"] flyio = ["gql", "requests_toolbelt"] @@ -762,17 +762,17 @@ tools = ["diskcache", "flake8", "networkx", "pathspec", "pygments", "ruff", "tra [[package]] name = "composio-langchain" -version = "0.5.51" +version = "0.6.3" description = "Use Composio to get an array of tools with your LangChain agent." optional = false python-versions = "<4,>=3.9" files = [ - {file = "composio_langchain-0.5.51-py3-none-any.whl", hash = "sha256:dc0e91b0b890ba5306bc096fcca376c3b0191d649c485300f477c0e5578deaa5"}, - {file = "composio_langchain-0.5.51.tar.gz", hash = "sha256:332bb3482ab293f45acf07ad0cf988e7a101f88caaaec818395663e4a6269432"}, + {file = "composio_langchain-0.6.3-py3-none-any.whl", hash = "sha256:0e749a1603dc0562293412d0a6429f88b75152b01a313cca859732070d762a6b"}, + {file = "composio_langchain-0.6.3.tar.gz", hash = "sha256:2036f94bfe60974b31f2be0bfdb33dd75a1d43435f275141219b3376587bf49d"}, ] [package.dependencies] -composio_core = ">=0.5.50,<=0.5.51" +composio_core = ">=0.5.0,<0.7.0" langchain = ">=0.1.0" langchain-openai = ">=0.0.2.post1" langchainhub = ">=0.1.15" @@ -6246,4 +6246,4 @@ tests = ["wikipedia"] [metadata] lock-version = "2.0" python-versions = "<4.0,>=3.10" -content-hash = "9c623c4d8c98b3fe724518428bb48ae85f8152453f200f767e13f48c59e0fe13" \ No newline at end of file +content-hash = "4a7cf176579d5dc15648979542da152ec98290f1e9f39039cfe9baf73bc1076f" diff --git a/pyproject.toml b/pyproject.toml index dc1da5940b..1278e9425e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "letta" -version = "0.6.4" +version = "0.6.5" packages = [ {include = "letta"} ] @@ -59,8 +59,8 @@ nltk = "^3.8.1" jinja2 = "^3.1.4" locust = {version = "^2.31.5", optional = true} wikipedia = {version = "^1.4.0", optional = true} -composio-langchain = "^0.5.28" -composio-core = "^0.5.51" +composio-langchain = "^0.6.3" +composio-core = "^0.6.3" alembic = "^1.13.3" pyhumps = "^3.8.0" psycopg2 = {version = "^2.9.10", optional = true} diff --git a/tests/helpers/endpoints_helper.py b/tests/helpers/endpoints_helper.py index ddaa1d960d..8f1aa99c74 100644 --- a/tests/helpers/endpoints_helper.py +++ b/tests/helpers/endpoints_helper.py @@ -64,6 +64,7 @@ def setup_agent( tool_ids: Optional[List[str]] = None, tool_rules: Optional[List[BaseToolRule]] = None, agent_uuid: str = agent_uuid, + include_base_tools: bool = True, ) -> AgentState: config_data = json.load(open(filename, "r")) llm_config = LLMConfig(**config_data) @@ -77,7 +78,7 @@ def setup_agent( memory = ChatMemory(human=memory_human_str, persona=memory_persona_str) agent_state = client.create_agent( - name=agent_uuid, llm_config=llm_config, embedding_config=embedding_config, memory=memory, tool_ids=tool_ids, tool_rules=tool_rules + name=agent_uuid, llm_config=llm_config, embedding_config=embedding_config, memory=memory, tool_ids=tool_ids, tool_rules=tool_rules, include_base_tools=include_base_tools, ) return agent_state diff --git a/tests/integration_test_agent_tool_graph.py b/tests/integration_test_agent_tool_graph.py index 19c7dbd6cb..336777215d 100644 --- a/tests/integration_test_agent_tool_graph.py +++ b/tests/integration_test_agent_tool_graph.py @@ -234,3 +234,51 @@ def test_claude_initial_tool_rule_enforced(mock_e2b_api_key_none): if i < 2: backoff_time = 10 * (2 ** i) time.sleep(backoff_time) + +@pytest.mark.timeout(60) # Sets a 60-second timeout for the test since this could loop infinitely +def test_agent_no_structured_output_with_one_child_tool(mock_e2b_api_key_none): + client = create_client() + cleanup(client=client, agent_uuid=agent_uuid) + + send_message = client.server.tool_manager.get_tool_by_name(tool_name="send_message", actor=client.user) + archival_memory_search = client.server.tool_manager.get_tool_by_name(tool_name="archival_memory_search", actor=client.user) + archival_memory_insert = client.server.tool_manager.get_tool_by_name(tool_name="archival_memory_insert", actor=client.user) + + # Make tool rules + tool_rules = [ + InitToolRule(tool_name="archival_memory_search"), + ChildToolRule(tool_name="archival_memory_search", children=["archival_memory_insert"]), + ChildToolRule(tool_name="archival_memory_insert", children=["send_message"]), + TerminalToolRule(tool_name="send_message"), + ] + tools = [send_message, archival_memory_search, archival_memory_insert] + + config_files = [ + "tests/configs/llm_model_configs/claude-3-sonnet-20240229.json", + "tests/configs/llm_model_configs/openai-gpt-4o.json", + ] + + for config in config_files: + agent_state = setup_agent(client, config, agent_uuid=agent_uuid, tool_ids=[t.id for t in tools], tool_rules=tool_rules) + response = client.user_message(agent_id=agent_state.id, message="hi. run archival memory search") + + # Make checks + assert_sanity_checks(response) + + # Assert the tools were called + assert_invoked_function_call(response.messages, "archival_memory_search") + assert_invoked_function_call(response.messages, "archival_memory_insert") + assert_invoked_function_call(response.messages, "send_message") + + # Check ordering of tool calls + tool_names = [t.name for t in [archival_memory_search, archival_memory_insert, send_message]] + for m in response.messages: + if isinstance(m, FunctionCallMessage): + # Check that it's equal to the first one + assert m.function_call.name == tool_names[0] + + # Pop out first one + tool_names = tool_names[1:] + + print(f"Got successful response from client: \n\n{response}") + cleanup(client=client, agent_uuid=agent_uuid)