Skip to content

Commit

Permalink
1) change back to non parallel function calling, 2) change llm backen…
Browse files Browse the repository at this point in the history
…d for io_agent
  • Loading branch information
IBMC265 committed Sep 28, 2024
1 parent 41fc4d2 commit ae183d7
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 21 deletions.
21 changes: 7 additions & 14 deletions litemultiagent/agents/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,6 @@
from openai import OpenAI
import json
from concurrent.futures import ThreadPoolExecutor, as_completed
#from litemultiagent.config.agent_config import agent_to_model, model_cost
# from litemultiagent.agents.base import BaseAgent
# from litemultiagent.tools.registry import ToolRegistry, Tool
from supabase import create_client, Client
from litellm import completion
import os
Expand Down Expand Up @@ -45,7 +42,7 @@
"tool_choice" : "auto",
},
"io_agent": {
"model_name" : "gpt-4o-mini",
"model_name" : "claude-3-5-sonnet-20240620",
"tool_choice" : "auto",
},
"retrieval_agent": {
Expand Down Expand Up @@ -116,7 +113,7 @@ def _send_completion_request(self, depth: int = 0) -> str:
if self.save_to == "csv":
self._save_to_csv(response, depth)
message = response.choices[0].message
self.messages.append(message)
self.messages.append(message.model_dump())
return message.content

response = completion(
Expand All @@ -137,7 +134,7 @@ def _send_completion_request(self, depth: int = 0) -> str:

if tool_calls is None or len(tool_calls) == 0:
message = response.choices[0].message
self.messages.append(message)
self.messages.append(message.model_dump())
return message.content

tool_call_message = {
Expand All @@ -156,14 +153,10 @@ def _process_tool_calls(self, tool_calls: List[Dict[str, Any]]) -> List[Dict[str
tool_call_responses = []
logger.info(f"Number of function calls: {len(tool_calls)}")

with ThreadPoolExecutor(max_workers=None) as executor:
future_to_tool_call = {executor.submit(self._process_single_tool_call, tool_call): tool_call for tool_call
in tool_calls}

for future in as_completed(future_to_tool_call):
result = future.result()
if result:
tool_call_responses.append(result)
for tool_call in tool_calls:
result = self._process_single_tool_call(tool_call)
if result:
tool_call_responses.append(result)

return tool_call_responses

Expand Down
16 changes: 9 additions & 7 deletions litemultiagent/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,17 +152,19 @@ def main():
main_agent = agent_manager.get_agent(main_agent_config)

# # # Example usage
# task = "generate a image of a ginger cat and save it as ginger_cat.png"
# result = main_agent.execute(task)
# print("IO Agent Result:", result)
#
# task = "write python script to calculate the sum from 1 to 10, and run the python script to get result"
# result = main_agent.execute(task)
# print("IO Agent Result:", result)
task = "generate a image of a ginger cat and save it as ginger_cat.png"
result = main_agent.execute(task)
print("IO Agent Result:", result)

task = "write python script to calculate the sum from 1 to 10, and run the python script to get result"
result = main_agent.execute(task)
print("IO Agent Result:", result)

task = "browse web to search and check the brands of dining table, and summarize the results in a table, save the table into a markdown file called summary.md"
result = main_agent.execute(task)
print("IO Agent Result:", result)



if __name__ == "__main__":
main()

0 comments on commit ae183d7

Please sign in to comment.