+```
+
+**For convenience, Lagent provides `InternLMActionProcessor` which is adapted to messages formatted by `ToolParser` as mentioned above.**
+
+### Dual Interfaces
+
+Lagent adopts dual interface design, where almost every component(LLMs, actions, action executors...) has the corresponding asynchronous variant by prefixing its identifier with 'Async'. It is recommended to use synchronous agents for debugging and asynchronous ones for large-scale inference to make the most of idle CPU and GPU resources.
+
+However, make sure the internal consistency of agents, i.e. asynchronous agents should be equipped with asynchronous LLMs and asynchronous action executors that drive asynchronous tools.
+
+```python
+from lagent.llms import VllmModel, AsyncVllmModel, LMDeployPipeline, AsyncLMDeployPipeline
+from lagent.actions import ActionExecutor, AsyncActionExecutor, WebBrowser, AsyncWebBrowser
+from lagent.agents import Agent, AsyncAgent, AgentForInternLM, AsyncAgentForInternLM
```
-## What's Lagent?
+______________________________________________________________________
+
+## Practice
+
+- **Try to implement `forward` instead of `__call__` of subclasses unless necessary.**
+- **Always include the `session_id` argument explicitly, which is designed for isolation of memory, LLM requests and tool invocation(e.g. maintain multiple independent IPython environments) in concurrency.**
+
+### Single Agent
+
+Math agents that solve problems by programming
+
+````python
+from lagent.agents.aggregator import InternLMToolAggregator
+
+class Coder(Agent):
+ def __init__(self, model_path, system_prompt, max_turn=3):
+ super().__init__()
+ llm = VllmModel(
+ path=model_path,
+ meta_template=INTERNLM2_META,
+ tp=1,
+ top_k=1,
+ temperature=1.0,
+ stop_words=['\n```\n', '<|im_end|>'],
+ max_new_tokens=1024,
+ )
+ self.agent = Agent(
+ llm,
+ system_prompt,
+ output_format=ToolParser(
+ tool_type='code interpreter', begin='```python\n', end='\n```\n'
+ ),
+ # `InternLMToolAggregator` is adapted to `ToolParser` for aggregating
+ # messages with tool invocations and execution results
+ aggregator=InternLMToolAggregator(),
+ )
+ self.executor = ActionExecutor([IPythonInteractive()], hooks=[CodeProcessor()])
+ self.max_turn = max_turn
+
+ def forward(self, message: AgentMessage, session_id=0) -> AgentMessage:
+ for _ in range(self.max_turn):
+ message = self.agent(message, session_id=session_id)
+ if message.formatted['tool_type'] is None:
+ return message
+ message = self.executor(message, session_id=session_id)
+ return message
+
+coder = Coder('Qwen/Qwen2-7B-Instruct', 'Solve the problem step by step with assistance of Python code')
+query = AgentMessage(
+ sender='user',
+ content='Find the projection of $\\mathbf{a}$ onto $\\mathbf{b} = '
+ '\\begin{pmatrix} 1 \\\\ -3 \\end{pmatrix}$ if $\\mathbf{a} \\cdot \\mathbf{b} = 2.$'
+)
+answer = coder(query)
+print(answer.content)
+print('-' * 120)
+for msg in coder.state_dict()['agent.memory']:
+ print('*' * 80)
+ print(f'{msg["sender"]}:\n\n{msg["content"]}')
+````
-Lagent is a lightweight open-source framework that allows users to efficiently build large language model(LLM)-based agents. It also provides some typical tools to augment LLM. The overview of our framework is shown below:
+### Multiple Agents
-
+Asynchronous blogging agents that improve writing quality by self-refinement ([original AutoGen example](https://microsoft.github.io/autogen/0.2/docs/topics/prompting-and-reasoning/reflection/))
-## Major Features
+```python
+import asyncio
+import os
+from lagent.llms import AsyncGPTAPI
+from lagent.agents import AsyncAgent
+os.environ['OPENAI_API_KEY'] = 'YOUR_API_KEY'
+
+class PrefixedMessageHook(Hook):
+ def __init__(self, prefix: str, senders: list = None):
+ self.prefix = prefix
+ self.senders = senders or []
+
+ def before_agent(self, agent, messages, session_id):
+ for i, message in enumerate(messages):
+ if message.sender in self.senders:
+ message = message.copy(deep=True)
+ message.content = self.prefix + message.content
+ messages[i] = message
+ return messages
+
+class AsyncBlogger(AsyncAgent):
+ def __init__(self, model_path, writer_prompt, critic_prompt, critic_prefix='', max_turn=3):
+ super().__init__()
+ llm = AsyncGPTAPI(model_type=model_path, retry=5, max_new_tokens=2048)
+ self.writer = AsyncAgent(llm, writer_prompt, name='writer')
+ self.critic = AsyncAgent(
+ llm, critic_prompt, name='critic', hooks=[PrefixedMessageHook(critic_prefix, ['writer'])]
+ )
+ self.max_turn = max_turn
+
+ async def forward(self, message: AgentMessage, session_id=0) -> AgentMessage:
+ for _ in range(self.max_turn):
+ message = await self.writer(message, session_id=session_id)
+ message = await self.critic(message, session_id=session_id)
+ return await self.writer(message, session_id=session_id)
+
+blogger = AsyncBlogger(
+ 'gpt-4o-2024-05-13',
+ writer_prompt="You are an writing assistant tasked to write engaging blogpost. You try to generate the best blogpost possible for the user's request. "
+ "If the user provides critique, then respond with a revised version of your previous attempts",
+ critic_prompt="Generate critique and recommendations on the writing. Provide detailed recommendations, including requests for length, depth, style, etc..",
+ critic_prefix='Reflect and provide critique on the following writing. \n\n',
+)
+user_prompt = (
+ "Write an engaging blogpost on the recent updates in {topic}. "
+ "The blogpost should be engaging and understandable for general audience. "
+ "Should have more than 3 paragraphes but no longer than 1000 words.")
+bot_msgs = asyncio.get_event_loop().run_until_complete(
+ asyncio.gather(
+ *[
+ blogger(AgentMessage(sender='user', content=user_prompt.format(topic=topic)), session_id=i)
+ for i, topic in enumerate(['AI', 'Biotechnology', 'New Energy', 'Video Games', 'Pop Music'])
+ ]
+ )
+)
+print(bot_msgs[0].content)
+print('-' * 120)
+for msg in blogger.state_dict(session_id=0)['writer.memory']:
+ print('*' * 80)
+ print(f'{msg["sender"]}:\n\n{msg["content"]}')
+print('-' * 120)
+for msg in blogger.state_dict(session_id=0)['critic.memory']:
+ print('*' * 80)
+ print(f'{msg["sender"]}:\n\n{msg["content"]}')
+```
+
+A multi-agent workflow that performs information retrieval, data collection and chart plotting ([original LangGraph example](https://vijaykumarkartha.medium.com/multiple-ai-agents-creating-multi-agent-workflows-using-langgraph-and-langchain-0587406ec4e6))
+
+
+

+
-- Stream Output: Provides the `stream_chat` interface for streaming output, allowing cool streaming demos right at your local setup.
-- Interfacing is unified, with a comprehensive design upgrade for enhanced extensibility, including:
- - Model: Whether it's the OpenAI API, Transformers, or LMDeploy inference acceleration framework, you can seamlessly switch between models.
- - Action: Simple inheritance and decoration allow you to create your own personal toolkit, adaptable to both InternLM and GPT.
- - Agent: Consistent with the Model's input interface, the transformation from model to intelligent agent only takes one step, facilitating the exploration and implementation of various agents.
-- Documentation has been thoroughly upgraded with full API documentation coverage.
+````python
+import json
+from lagent.actions import IPythonInterpreter, WebBrowser, ActionExecutor
+from lagent.agents.stream import get_plugin_prompt
+from lagent.llms import GPTAPI
+from lagent.hooks import InternLMActionProcessor
-## 💻Tech Stack
+TOOL_TEMPLATE = (
+ "You are a helpful AI assistant, collaborating with other assistants. Use the provided tools to progress"
+ " towards answering the question. If you are unable to fully answer, that's OK, another assistant with"
+ " different tools will help where you left off. Execute what you can to make progress. If you or any of"
+ " the other assistants have the final answer or deliverable, prefix your response with {finish_pattern}"
+ " so the team knows to stop. You have access to the following tools:\n{tool_description}\nPlease provide"
+ " your thought process when you need to use a tool, followed by the call statement in this format:"
+ "\n{invocation_format}\\\\n**{system_prompt}**"
+)
-
-
-
-
+class DataVisualizer(Agent):
+ def __init__(self, model_path, research_prompt, chart_prompt, finish_pattern="Final Answer", max_turn=10):
+ super().__init__()
+ llm = GPTAPI(model_path, key='YOUR_OPENAI_API_KEY', retry=5, max_new_tokens=1024, stop_words=["```\n"])
+ interpreter, browser = IPythonInterpreter(), WebBrowser("BingSearch", api_key="YOUR_BING_API_KEY")
+ self.researcher = Agent(
+ llm,
+ TOOL_TEMPLATE.format(
+ finish_pattern=finish_pattern,
+ tool_description=get_plugin_prompt(browser),
+ invocation_format='```json\n{"name": {{tool name}}, "parameters": {{keyword arguments}}}\n```\n',
+ system_prompt=research_prompt,
+ ),
+ output_format=ToolParser(
+ "browser",
+ begin="```json\n",
+ end="\n```\n",
+ validate=lambda x: json.loads(x.rstrip('`')),
+ ),
+ aggregator=InternLMToolAggregator(),
+ name="researcher",
+ )
+ self.charter = Agent(
+ llm,
+ TOOL_TEMPLATE.format(
+ finish_pattern=finish_pattern,
+ tool_description=interpreter.name,
+ invocation_format='```python\n{{code}}\n```\n',
+ system_prompt=chart_prompt,
+ ),
+ output_format=ToolParser(
+ "interpreter",
+ begin="```python\n",
+ end="\n```\n",
+ validate=lambda x: x.rstrip('`'),
+ ),
+ aggregator=InternLMToolAggregator(),
+ name="charter",
+ )
+ self.executor = ActionExecutor([interpreter, browser], hooks=[InternLMActionProcessor()])
+ self.finish_pattern = finish_pattern
+ self.max_turn = max_turn
-### All Thanks To Our Contributors:
+ def forward(self, message, session_id=0):
+ for _ in range(self.max_turn):
+ message = self.researcher(message, session_id=session_id, stop_words=["```\n", "```python"]) # override llm stop words
+ while message.formatted["tool_type"]:
+ message = self.executor(message, session_id=session_id)
+ message = self.researcher(message, session_id=session_id, stop_words=["```\n", "```python"])
+ if self.finish_pattern in message.content:
+ return message
+ message = self.charter(message)
+ while message.formatted["tool_type"]:
+ message = self.executor(message, session_id=session_id)
+ message = self.charter(message, session_id=session_id)
+ if self.finish_pattern in message.content:
+ return message
+ return message
-
-
-
+visualizer = DataVisualizer(
+ "gpt-4o-2024-05-13",
+ research_prompt="You should provide accurate data for the chart generator to use.",
+ chart_prompt="Any charts you display will be visible by the user.",
+)
+user_msg = AgentMessage(
+ sender='user',
+ content="Fetch the China's GDP over the past 5 years, then draw a line graph of it. Once you code it up, finish.")
+bot_msg = visualizer(user_msg)
+print(bot_msg.content)
+json.dump(visualizer.state_dict(), open('visualizer.json', 'w'), ensure_ascii=False, indent=4)
+````
## Citation
diff --git a/docs/en/get_started/overview.md b/docs/en/get_started/overview.md
deleted file mode 100644
index 5be46835..00000000
--- a/docs/en/get_started/overview.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Overview
-
-This chapter introduces you to the framework of Lagent, and provides links to detailed tutorials about Lagent.
-
-## What is Lagent
-
-Lagent is an open source LLM agent framework, which enables people to efficiently turn a large language model to agent. It also provides some typical tools to enlighten the ability of LLM, and the whole framework is shown below:
-
-
-
-Lagent consists of 3 main parts, agents, llms, and actions.
-
-- **agents** provides agent implementation, such as ReAct, AutoGPT.
-- **llms** supports various large language models, including open-sourced models (Llama-2, InternLM) through HuggingFace models or closed-source models like GPT3.5/4.
-- **actions** contains a series of actions, as well as an action executor to manage all actions.
-
-## How to Use
-
-Here is a detailed step-by-step guide to learn more about Lagent:
-
-1. For installation instructions, please see [README](https://github.com/InternLM/lagent/blob/main/README.md).
-
-2. We provide several examples to build agents with Lagent in [examples](https://github.com/InternLM/lagent/tree/main/examples) by simply run `python examples/react_example.py`.
diff --git a/docs/en/get_started/quickstart.md b/docs/en/get_started/quickstart.md
index e80ae492..9d2c50b7 100644
--- a/docs/en/get_started/quickstart.md
+++ b/docs/en/get_started/quickstart.md
@@ -1,89 +1,488 @@
-# Quickstart
+# How to Use Lagent
-Using Lagent, you can easily build agents with just a few lines of code.
+Lagent v1.0 is inspired by the design philosophy of PyTorch. We expect that the analogy of neural network layers will make the workflow clearer and more intuitive, so users only need to focus on creating layers and defining message passing between them in a Pythonic way. This is a simple tutorial to get you quickly started with building multi-agent applications.
-## Run a ReWOO agent with GPT-3.5
+## Core Ideas
-Below is an example of running ReWOO with GPT-3.5
+### Models as Agents
+
+Agents use `AgentMessage` for communication.
```python
-# Import necessary modules and classes from the "lagent" library.
-from lagent.agents import ReWOO
-from lagent.actions import ActionExecutor, GoogleSearch
-from lagent.llms import GPTAPI
+from typing import Dict, List
+from lagent.agents import Agent
+from lagent.schema import AgentMessage
+from lagent.llms import VllmModel, INTERNLM2_META
-# Initialize the Language Model (llm) and provide your API key.
-llm = GPTAPI(model_type='gpt-3.5-turbo', key=['Your OPENAI_API_KEY'])
+llm = VllmModel(
+ path='Qwen/Qwen2-7B-Instruct',
+ meta_template=INTERNLM2_META,
+ tp=1,
+ top_k=1,
+ temperature=1.0,
+ stop_words=['<|im_end|>'],
+ max_new_tokens=1024,
+)
+system_prompt = '你的回答只能从“典”、“孝”、“急”三个字中选一个。'
+agent = Agent(llm, system_prompt)
-# Initialize the Google Search tool and provide your API key.
-search_tool = GoogleSearch(api_key='Your SERPER_API_KEY')
+user_msg = AgentMessage(sender='user', content='今天天气情况')
+bot_msg = agent(user_msg)
+print(bot_msg)
+```
-# Create a chatbot by configuring the ReWOO agent.
-chatbot = ReWOO(
- llm=llm, # Provide the Language Model instance.
- action_executor=ActionExecutor(
- actions=[search_tool] # Specify the actions the chatbot can perform.
- ),
-)
+```
+content='急' sender='Agent' formatted=None extra_info=None type=None receiver=None stream_state=
+```
+
+### Memory as State
-# Ask the chatbot a question and store the response.
-response = chatbot.chat('What profession does Nicholas Ray and Elia Kazan have in common')
+Both input and output messages will be added to the memory of `Agent` in each forward pass. This is performed in `__call__` rather than `forward`. See the following pseudo code
-# Print the chatbot's response.
-print(response.response) # Output the response generated by the chatbot.
+```python
+ def __call__(self, *message):
+ message = pre_hooks(message)
+ add_memory(message)
+ message = self.forward(*message)
+ add_memory(message)
+ message = post_hooks(message)
+ return message
```
+Inspect the memory in two ways
+
```python
->>> Film director.
+memory: List[AgentMessage] = agent.memory.get_memory()
+print(memory)
+print('-' * 120)
+dumped_memory: Dict[str, List[dict]] = agent.state_dict()
+print(dumped_memory['memory'])
```
-## Run a ReAct agent with InternLM
+```
+[AgentMessage(content='今天天气情况', sender='user', formatted=None, extra_info=None, type=None, receiver=None, stream_state=), AgentMessage(content='急', sender='Agent', formatted=None, extra_info=None, type=None, receiver=None, stream_state=)]
+------------------------------------------------------------------------------------------------------------------------
+[{'content': '今天天气情况', 'sender': 'user', 'formatted': None, 'extra_info': None, 'type': None, 'receiver': None, 'stream_state': }, {'content': '急', 'sender': 'Agent', 'formatted': None, 'extra_info': None, 'type': None, 'receiver': None, 'stream_state': }]
+```
-NOTE: If you want to run a HuggingFace model, please run `pip install -e .[all]` first.
+Clear the memory of this session(`session_id=0` by default):
```python
-# Import necessary modules and classes from the "lagent" library.
-from lagent.agents import ReAct
-from lagent.actions import ActionExecutor, GoogleSearch, PythonInterpreter
-from lagent.llms import HFTransformer
+agent.memory.reset()
+```
-from lagent.llms.meta_template import INTERNLM2_META as META
+### Custom Message Aggregation
-# Initialize the HFTransformer-based Language Model (llm) and
-# provide the model name.
-llm = HFTransformer(path='internlm/internlm2-chat-7b', meta_template=META)
+`DefaultAggregator` is called under the hood to assemble and convert `AgentMessage` to OpenAI message format.
+
+```python
+ def forward(self, *message: AgentMessage, session_id=0, **kwargs) -> Union[AgentMessage, str]:
+ formatted_messages = self.aggregator.aggregate(
+ self.memory.get(session_id),
+ self.name,
+ self.output_format,
+ self.template,
+ )
+ llm_response = self.llm.chat(formatted_messages, **kwargs)
+ ...
+```
+
+Implement a simple aggregator that can receive few-shots
+
+```python
+from typing import List, Union
+from lagent.memory import Memory
+from lagent.prompts import StrParser
+from lagent.agents.aggregator import DefaultAggregator
-# Initialize the Google Search tool and provide your API key.
-search_tool = GoogleSearch(api_key='Your SERPER_API_KEY')
+class FewshotAggregator(DefaultAggregator):
+ def __init__(self, few_shot: List[dict] = None):
+ self.few_shot = few_shot or []
-# Initialize the Python Interpreter tool.
-python_interpreter = PythonInterpreter()
+ def aggregate(self,
+ messages: Memory,
+ name: str,
+ parser: StrParser = None,
+ system_instruction: Union[str, dict, List[dict]] = None) -> List[dict]:
+ _message = []
+ if system_instruction:
+ _message.extend(
+ self.aggregate_system_intruction(system_instruction))
+ _message.extend(self.few_shot)
+ messages = messages.get_memory()
+ for message in messages:
+ if message.sender == name:
+ _message.append(
+ dict(role='assistant', content=str(message.content)))
+ else:
+ user_message = message.content
+ if len(_message) > 0 and _message[-1]['role'] == 'user':
+ _message[-1]['content'] += user_message
+ else:
+ _message.append(dict(role='user', content=user_message))
+ return _message
-# Create a chatbot by configuring the ReAct agent.
-# Specify the actions the chatbot can perform.
-chatbot = ReAct(
- llm=llm, # Provide the Language Model instance.
- action_executor=ActionExecutor(
- actions=[search_tool, python_interpreter]),
+agent = Agent(
+ llm,
+ aggregator=FewshotAggregator(
+ [
+ {"role": "user", "content": "今天天气"},
+ {"role": "assistant", "content": "【晴】"},
+ ]
+ )
)
-# Ask the chatbot a mathematical question in LaTeX format.
-response = chatbot.chat('若$z=-1+\sqrt{3}i$,则$\frac{z}{{z\overline{z}-1}}=\left(\ \ \right)$')
+user_msg = AgentMessage(sender='user', content='昨天天气')
+bot_msg = agent(user_msg)
+print(bot_msg)
+```
-# Print the chatbot's response.
-print(response.response) # Output the response generated by the chatbot.
```
+content='【多云转晴,夜间有轻微降温】' sender='Agent' formatted=None extra_info=None type=None receiver=None stream_state=
+```
+
+### Flexible Response Formatting
+
+In `AgentMessage`, `formatted` is reserved to store information parsed by `output_format` from the model output.
```python
->>> $-\\frac{1}{3}+\\frac{{\\sqrt{3}}}{3}i$
+ def forward(self, *message: AgentMessage, session_id=0, **kwargs) -> Union[AgentMessage, str]:
+ ...
+ llm_response = self.llm.chat(formatted_messages, **kwargs)
+ if self.output_format:
+ formatted_messages = self.output_format.parse_response(llm_response)
+ return AgentMessage(
+ sender=self.name,
+ content=llm_response,
+ formatted=formatted_messages,
+ )
+ ...
```
-## Run ReAct Web Demo
+Use a tool parser as follows
+
+````python
+from lagent.prompts.parsers import ToolParser
+
+system_prompt = "逐步分析并编写Python代码解决以下问题。"
+parser = ToolParser(tool_type='code interpreter', begin='```python\n', end='\n```\n')
+llm.gen_params['stop_words'].append('\n```\n')
+agent = Agent(llm, system_prompt, output_format=parser)
+
+user_msg = AgentMessage(
+ sender='user',
+ content='Marie is thinking of a multiple of 63, while Jay is thinking of a '
+ 'factor of 63. They happen to be thinking of the same number. There are '
+ 'two possibilities for the number that each of them is thinking of, one '
+ 'positive and one negative. Find the product of these two numbers.')
+bot_msg = agent(user_msg)
+print(bot_msg.model_dump_json(indent=4))
+````
+
+````
+{
+ "content": "首先,我们需要找出63的所有正因数和负因数。63的正因数可以通过分解63的质因数来找出,即\\(63 = 3^2 \\times 7\\)。因此,63的正因数包括1, 3, 7, 9, 21, 和 63。对于负因数,我们只需将上述正因数乘以-1。\n\n接下来,我们需要找出与63的正因数相乘的结果为63的数,以及与63的负因数相乘的结果为63的数。这可以通过将63除以每个正因数和负因数来实现。\n\n最后,我们将找到的两个数相乘得到最终答案。\n\n下面是Python代码实现:\n\n```python\ndef find_numbers():\n # 正因数\n positive_factors = [1, 3, 7, 9, 21, 63]\n # 负因数\n negative_factors = [-1, -3, -7, -9, -21, -63]\n \n # 找到与正因数相乘的结果为63的数\n positive_numbers = [63 / factor for factor in positive_factors]\n # 找到与负因数相乘的结果为63的数\n negative_numbers = [-63 / factor for factor in negative_factors]\n \n # 计算两个数的乘积\n product = positive_numbers[0] * negative_numbers[0]\n \n return product\n\nresult = find_numbers()\nprint(result)",
+ "sender": "Agent",
+ "formatted": {
+ "tool_type": "code interpreter",
+ "thought": "首先,我们需要找出63的所有正因数和负因数。63的正因数可以通过分解63的质因数来找出,即\\(63 = 3^2 \\times 7\\)。因此,63的正因数包括1, 3, 7, 9, 21, 和 63。对于负因数,我们只需将上述正因数乘以-1。\n\n接下来,我们需要找出与63的正因数相乘的结果为63的数,以及与63的负因数相乘的结果为63的数。这可以通过将63除以每个正因数和负因数来实现。\n\n最后,我们将找到的两个数相乘得到最终答案。\n\n下面是Python代码实现:\n\n",
+ "action": "def find_numbers():\n # 正因数\n positive_factors = [1, 3, 7, 9, 21, 63]\n # 负因数\n negative_factors = [-1, -3, -7, -9, -21, -63]\n \n # 找到与正因数相乘的结果为63的数\n positive_numbers = [63 / factor for factor in positive_factors]\n # 找到与负因数相乘的结果为63的数\n negative_numbers = [-63 / factor for factor in negative_factors]\n \n # 计算两个数的乘积\n product = positive_numbers[0] * negative_numbers[0]\n \n return product\n\nresult = find_numbers()\nprint(result)",
+ "status": 1
+ },
+ "extra_info": null,
+ "type": null,
+ "receiver": null,
+ "stream_state": 0
+}
+````
+
+### Consistency of Tool Calling
+
+`ActionExecutor` uses the same communication data structure as `Agent`, but requires the content of input `AgentMessage` to be a dict containing:
+
+- `name`: tool name, e.g. `'IPythonInterpreter'`, `'WebBrowser.search'`.
+- `parameters`: keyword arguments of the tool API, e.g. `{'command': 'import math;math.sqrt(2)'}`, `{'query': ['recent progress in AI']}`.
+
+You can register custom hooks for message conversion.
```python
-# You need to install streamlit first
-# pip install streamlit
-streamlit run examples/react_web_demo.py
+from lagent.hooks import Hook
+from lagent.schema import ActionReturn, ActionStatusCode, AgentMessage
+from lagent.actions import ActionExecutor, IPythonInteractive
+
+class CodeProcessor(Hook):
+ def before_action(self, executor, message, session_id):
+ message = message.copy(deep=True)
+ message.content = dict(
+ name='IPythonInteractive', parameters={'command': message.formatted['action']}
+ )
+ return message
+
+ def after_action(self, executor, message, session_id):
+ action_return = message.content
+ if isinstance(action_return, ActionReturn):
+ if action_return.state == ActionStatusCode.SUCCESS:
+ response = action_return.format_result()
+ else:
+ response = action_return.errmsg
+ else:
+ response = action_return
+ message.content = response
+ return message
+
+executor = ActionExecutor(actions=[IPythonInteractive()], hooks=[CodeProcessor()])
+bot_msg = AgentMessage(
+ sender='Agent',
+ content='首先,我们需要...',
+ formatted={
+ 'tool_type': 'code interpreter',
+ 'thought': '首先,我们需要...',
+ 'action': 'def find_numbers():\n # 正因数\n positive_factors = [1, 3, 7, 9, 21, 63]\n # 负因数\n negative_factors = [-1, -3, -7, -9, -21, -63]\n \n # 找到与正因数相乘的结果为63的数\n positive_numbers = [63 / factor for factor in positive_factors]\n # 找到与负因数相乘的结果为63的数\n negative_numbers = [-63 / factor for factor in negative_factors]\n \n # 计算两个数的乘积\n product = positive_numbers[0] * negative_numbers[0]\n \n return product\n\nresult = find_numbers()\nprint(result)',
+ 'status': 1
+ })
+executor_msg = executor(bot_msg)
+print(executor_msg)
```
-Then you can chat through the UI shown as below
-
+```
+content='3969.0' sender='ActionExecutor' formatted=None extra_info=None type=None receiver=None stream_state=
+```
+
+**For convenience, Lagent provides `InternLMActionProcessor` which is adapted to messages formatted by `ToolParser` as mentioned above.**
+
+### Dual Interfaces
+
+Lagent adopts dual interface design, where almost every component(LLMs, actions, action executors...) has the corresponding asynchronous variant by prefixing its identifier with 'Async'. It is recommended to use synchronous agents for debugging and asynchronous ones for large-scale inference to make the most of idle CPU and GPU resources.
+
+However, make sure the internal consistency of agents, i.e. asynchronous agents should be equipped with asynchronous LLMs and asynchronous action executors that drive asynchronous tools.
+
+```python
+from lagent.llms import VllmModel, AsyncVllmModel, LMDeployPipeline, AsyncLMDeployPipeline
+from lagent.actions import ActionExecutor, AsyncActionExecutor, WebBrowser, AsyncWebBrowser
+from lagent.agents import Agent, AsyncAgent, AgentForInternLM, AsyncAgentForInternLM
+```
+
+______________________________________________________________________
+
+## Practice
+
+- **Try to implement `forward` instead of `__call__` of subclasses unless necessary.**
+- **Always include the `session_id` argument explicitly, which is designed for isolation of memory, LLM requests and tool invocation(e.g. maintain multiple independent IPython environments) in concurrency.**
+
+### Single Agent
+
+Math agents that solve problems by programming
+
+````python
+from lagent.agents.aggregator import InternLMToolAggregator
+
+class Coder(Agent):
+ def __init__(self, model_path, system_prompt, max_turn=3):
+ super().__init__()
+ llm = VllmModel(
+ path=model_path,
+ meta_template=INTERNLM2_META,
+ tp=1,
+ top_k=1,
+ temperature=1.0,
+ stop_words=['\n```\n', '<|im_end|>'],
+ max_new_tokens=1024,
+ )
+ self.agent = Agent(
+ llm,
+ system_prompt,
+ output_format=ToolParser(
+ tool_type='code interpreter', begin='```python\n', end='\n```\n'
+ ),
+ # `InternLMToolAggregator` is adapted to `ToolParser` for aggregating
+ # messages with tool invocations and execution results
+ aggregator=InternLMToolAggregator(),
+ )
+ self.executor = ActionExecutor([IPythonInteractive()], hooks=[CodeProcessor()])
+ self.max_turn = max_turn
+
+ def forward(self, message: AgentMessage, session_id=0) -> AgentMessage:
+ for _ in range(self.max_turn):
+ message = self.agent(message, session_id=session_id)
+ if message.formatted['tool_type'] is None:
+ return message
+ message = self.executor(message, session_id=session_id)
+ return message
+
+coder = Coder('Qwen/Qwen2-7B-Instruct', 'Solve the problem step by step with assistance of Python code')
+query = AgentMessage(
+ sender='user',
+ content='Find the projection of $\\mathbf{a}$ onto $\\mathbf{b} = '
+ '\\begin{pmatrix} 1 \\\\ -3 \\end{pmatrix}$ if $\\mathbf{a} \\cdot \\mathbf{b} = 2.$'
+)
+answer = coder(query)
+print(answer.content)
+print('-' * 120)
+for msg in coder.state_dict()['agent.memory']:
+ print('*' * 80)
+ print(f'{msg["sender"]}:\n\n{msg["content"]}')
+````
+
+### Multiple Agents
+
+Asynchronous blogging agents that improve writing quality by self-refinement ([original AutoGen example](https://microsoft.github.io/autogen/0.2/docs/topics/prompting-and-reasoning/reflection/))
+
+```python
+import asyncio
+import os
+from lagent.llms import AsyncGPTAPI
+from lagent.agents import AsyncAgent
+os.environ['OPENAI_API_KEY'] = 'YOUR_API_KEY'
+
+class PrefixedMessageHook(Hook):
+ def __init__(self, prefix: str, senders: list = None):
+ self.prefix = prefix
+ self.senders = senders or []
+
+ def before_agent(self, agent, messages, session_id):
+ for i, message in enumerate(messages):
+ if message.sender in self.senders:
+ message = message.copy(deep=True)
+ message.content = self.prefix + message.content
+ messages[i] = message
+ return messages
+
+class AsyncBlogger(AsyncAgent):
+ def __init__(self, model_path, writer_prompt, critic_prompt, critic_prefix='', max_turn=3):
+ super().__init__()
+ llm = AsyncGPTAPI(model_type=model_path, retry=5, max_new_tokens=2048)
+ self.writer = AsyncAgent(llm, writer_prompt, name='writer')
+ self.critic = AsyncAgent(
+ llm, critic_prompt, name='critic', hooks=[PrefixedMessageHook(critic_prefix, ['writer'])]
+ )
+ self.max_turn = max_turn
+
+ async def forward(self, message: AgentMessage, session_id=0) -> AgentMessage:
+ for _ in range(self.max_turn):
+ message = await self.writer(message, session_id=session_id)
+ message = await self.critic(message, session_id=session_id)
+ return await self.writer(message, session_id=session_id)
+
+blogger = AsyncBlogger(
+ 'gpt-4o-2024-05-13',
+ writer_prompt="You are an writing assistant tasked to write engaging blogpost. You try to generate the best blogpost possible for the user's request. "
+ "If the user provides critique, then respond with a revised version of your previous attempts",
+ critic_prompt="Generate critique and recommendations on the writing. Provide detailed recommendations, including requests for length, depth, style, etc..",
+ critic_prefix='Reflect and provide critique on the following writing. \n\n',
+)
+user_prompt = (
+ "Write an engaging blogpost on the recent updates in {topic}. "
+ "The blogpost should be engaging and understandable for general audience. "
+ "Should have more than 3 paragraphes but no longer than 1000 words.")
+bot_msgs = asyncio.get_event_loop().run_until_complete(
+ asyncio.gather(
+ *[
+ blogger(AgentMessage(sender='user', content=user_prompt.format(topic=topic)), session_id=i)
+ for i, topic in enumerate(['AI', 'Biotechnology', 'New Energy', 'Video Games', 'Pop Music'])
+ ]
+ )
+)
+print(bot_msgs[0].content)
+print('-' * 120)
+for msg in blogger.state_dict(session_id=0)['writer.memory']:
+ print('*' * 80)
+ print(f'{msg["sender"]}:\n\n{msg["content"]}')
+print('-' * 120)
+for msg in blogger.state_dict(session_id=0)['critic.memory']:
+ print('*' * 80)
+ print(f'{msg["sender"]}:\n\n{msg["content"]}')
+```
+
+A multi-agent workflow that performs information retrieval, data collection and chart plotting ([original LangGraph example](https://vijaykumarkartha.medium.com/multiple-ai-agents-creating-multi-agent-workflows-using-langgraph-and-langchain-0587406ec4e6))
+
+
+

+
+
+````python
+import json
+from lagent.actions import IPythonInterpreter, WebBrowser, ActionExecutor
+from lagent.agents.stream import get_plugin_prompt
+from lagent.llms import GPTAPI
+from lagent.hooks import InternLMActionProcessor
+
+TOOL_TEMPLATE = (
+ "You are a helpful AI assistant, collaborating with other assistants. Use the provided tools to progress"
+ " towards answering the question. If you are unable to fully answer, that's OK, another assistant with"
+ " different tools will help where you left off. Execute what you can to make progress. If you or any of"
+ " the other assistants have the final answer or deliverable, prefix your response with {finish_pattern}"
+ " so the team knows to stop. You have access to the following tools:\n{tool_description}\nPlease provide"
+ " your thought process when you need to use a tool, followed by the call statement in this format:"
+ "\n{invocation_format}\\\\n**{system_prompt}**"
+)
+
+class DataVisualizer(Agent):
+ def __init__(self, model_path, research_prompt, chart_prompt, finish_pattern="Final Answer", max_turn=10):
+ super().__init__()
+ llm = GPTAPI(model_path, key='YOUR_OPENAI_API_KEY', retry=5, max_new_tokens=1024, stop_words=["```\n"])
+ interpreter, browser = IPythonInterpreter(), WebBrowser("BingSearch", api_key="YOUR_BING_API_KEY")
+ self.researcher = Agent(
+ llm,
+ TOOL_TEMPLATE.format(
+ finish_pattern=finish_pattern,
+ tool_description=get_plugin_prompt(browser),
+ invocation_format='```json\n{"name": {{tool name}}, "parameters": {{keyword arguments}}}\n```\n',
+ system_prompt=research_prompt,
+ ),
+ output_format=ToolParser(
+ "browser",
+ begin="```json\n",
+ end="\n```\n",
+ validate=lambda x: json.loads(x.rstrip('`')),
+ ),
+ aggregator=InternLMToolAggregator(),
+ name="researcher",
+ )
+ self.charter = Agent(
+ llm,
+ TOOL_TEMPLATE.format(
+ finish_pattern=finish_pattern,
+ tool_description=interpreter.name,
+ invocation_format='```python\n{{code}}\n```\n',
+ system_prompt=chart_prompt,
+ ),
+ output_format=ToolParser(
+ "interpreter",
+ begin="```python\n",
+ end="\n```\n",
+ validate=lambda x: x.rstrip('`'),
+ ),
+ aggregator=InternLMToolAggregator(),
+ name="charter",
+ )
+ self.executor = ActionExecutor([interpreter, browser], hooks=[InternLMActionProcessor()])
+ self.finish_pattern = finish_pattern
+ self.max_turn = max_turn
+
+ def forward(self, message, session_id=0):
+ for _ in range(self.max_turn):
+ message = self.researcher(message, session_id=session_id, stop_words=["```\n", "```python"]) # override llm stop words
+ while message.formatted["tool_type"]:
+ message = self.executor(message, session_id=session_id)
+ message = self.researcher(message, session_id=session_id, stop_words=["```\n", "```python"])
+ if self.finish_pattern in message.content:
+ return message
+ message = self.charter(message)
+ while message.formatted["tool_type"]:
+ message = self.executor(message, session_id=session_id)
+ message = self.charter(message, session_id=session_id)
+ if self.finish_pattern in message.content:
+ return message
+ return message
+
+visualizer = DataVisualizer(
+ "gpt-4o-2024-05-13",
+ research_prompt="You should provide accurate data for the chart generator to use.",
+ chart_prompt="Any charts you display will be visible by the user.",
+)
+user_msg = AgentMessage(
+ sender='user',
+ content="Fetch the China's GDP over the past 5 years, then draw a line graph of it. Once you code it up, finish.")
+bot_msg = visualizer(user_msg)
+print(bot_msg.content)
+json.dump(visualizer.state_dict(), open('visualizer.json', 'w'), ensure_ascii=False, indent=4)
+````
diff --git a/docs/en/index.rst b/docs/en/index.rst
index f74c594d..f51356c1 100644
--- a/docs/en/index.rst
+++ b/docs/en/index.rst
@@ -7,7 +7,6 @@ You can switch between English and Chinese in the lower-left corner of the layou
:maxdepth: 2
:caption: Get Started
- get_started/overview.md
get_started/install.md
get_started/quickstart.md
diff --git a/docs/zh_cn/get_started/overview.md b/docs/zh_cn/get_started/overview.md
deleted file mode 100644
index 9f76e45e..00000000
--- a/docs/zh_cn/get_started/overview.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# 总览
-
-本章节将介绍 Lagent 的架构,并提供 Lagent 详细教程的链接。
-
-## Lagent 是什么
-
-Lagent 是一个开源的 LLM 智能体框架,允许使用者快速将一个大语言模型转换成智能体,并提供一些典型工具来激发大语言模型的潜能。Lagent 框架图如下:
-
-
-
-Lagent 包含三个主要模块:agents,llms 和 actions。
-
-- **agents** 实现了多种智能体,如 ReAct,AutoGPT。
-- **llms** 支持多种大语言模型,包括在 HuggingFace 上托管的开源模型(Llama-2, InternLM)及 GPT3.5/4 等闭源模型。
-- **actions** 包含一系列工具,并提供工具执行器来统一管理。
-
-## 如何使用
-
-以下是帮助您了解关于 Lagent 更多信息的详细教程:
-
-1. 安装请参考 [README](https://github.com/InternLM/lagent/blob/main/README.md).
-
-2. 一些构建智能体的实例 [examples](https://github.com/InternLM/lagent/tree/main/examples),直接运行脚本即可,如 `python examples/react_example.py`.
diff --git a/docs/zh_cn/get_started/quickstart.md b/docs/zh_cn/get_started/quickstart.md
deleted file mode 100644
index e9a6f24b..00000000
--- a/docs/zh_cn/get_started/quickstart.md
+++ /dev/null
@@ -1,87 +0,0 @@
-# 快速上手
-
-借助 Lagent 仅需几行代码就能构建大语言模型智能体。
-
-## GPT-3.5 驱动的 ReWOO 智能体
-
-下面是使用 GPT-3.5 运行 ReWOO 的示例
-
-```python
-# 从 Lagent 导入必要的模块和类
-from lagent.agents import ReWOO
-from lagent.actions import ActionExecutor, GoogleSearch
-from lagent.llms import GPTAPI
-
-# 初始化 LLM,你可能需要提供 API 密钥
-llm = GPTAPI(model_type='gpt-3.5-turbo', key=['Your OPENAI_API_KEY'])
-
-# 初始化 Google 搜索工具,你可能需要提供 API 密钥
-search_tool = GoogleSearch(api_key='Your SERPER_API_KEY')
-
-# 配置 ReWOO 智能体,创建聊天机器人
-chatbot = ReWOO(
- llm=llm, # 大语言模型实例
- action_executor=ActionExecutor(
- actions=[search_tool] # 指定智能体可以调用的工具
- ),
-)
-
-# 询问问题并获取回复
-response = chatbot.chat('What profession does Nicholas Ray and Elia Kazan have in common')
-
-# 打印回复
-print(response.response)
-```
-
-```python
->>> Film director.
-```
-
-## InterLM 驱动的 ReAct 智能体
-
-注意,如果你想使用 HuggingFace 模型,请先运行 `pip install -e .[all]`
-
-```python
-# 从 Lagent 导入必要的模块和类
-from lagent.agents import ReAct
-from lagent.actions import ActionExecutor, GoogleSearch, PythonInterpreter
-from lagent.llms import HFTransformer
-
-from lagent.llms.meta_template import INTERNLM2_META as META
-
-# 初始化 HFTransformer 模型
-llm = HFTransformer(path='internlm/internlm2-chat-7b', meta_template=META)
-
-# 初始化 Goolge 搜索工具,你可能需要提供 API 密钥
-search_tool = GoogleSearch(api_key='Your SERPER_API_KEY')
-
-# 初始化 Python 代码解释其
-python_interpreter = PythonInterpreter()
-
-# 配置 ReAct 智能体,创建聊天机器人
-chatbot = ReAct(
- llm=llm, # 大语言模型实例
- action_executor=ActionExecutor(
- actions=[search_tool, python_interpreter]), # 指定智能体可以调用的工具
-)
-# 询问LaTeX格式的数学问题
-response = chatbot.chat('若$z=-1+\sqrt{3}i$,则$\frac{z}{{z\overline{z}-1}}=\left(\ \ \right)$')
-
-# 打印回复
-print(response.response)
-```
-
-```python
->>> $-\\frac{1}{3}+\\frac{{\\sqrt{3}}}{3}i$
-```
-
-## 启动 ReAct 网页 App
-
-```python
-# 你需要先安装 streamlit
-# pip install streamlit
-streamlit run examples/react_web_demo.py
-```
-
-然后你可以通过下图所示UI界面进行对话
-
diff --git a/docs/zh_cn/index.rst b/docs/zh_cn/index.rst
index 3089e209..c62bff68 100644
--- a/docs/zh_cn/index.rst
+++ b/docs/zh_cn/index.rst
@@ -7,9 +7,7 @@
:maxdepth: 2
:caption: 新手入门
- get_started/overview.md
get_started/install.md
- get_started/quickstart.md
.. toctree::
:maxdepth: 2
diff --git a/examples/internlm2_agent_cli_demo.py b/examples/internlm2_agent_cli_demo.py
deleted file mode 100644
index 4774fd4f..00000000
--- a/examples/internlm2_agent_cli_demo.py
+++ /dev/null
@@ -1,99 +0,0 @@
-from argparse import ArgumentParser
-
-from lagent.actions import ActionExecutor, ArxivSearch, IPythonInterpreter
-from lagent.agents.internlm2_agent import INTERPRETER_CN, META_CN, PLUGIN_CN, Internlm2Agent, Internlm2Protocol
-from lagent.llms import HFTransformer
-from lagent.llms.meta_template import INTERNLM2_META as META
-from lagent.schema import AgentStatusCode
-
-
-def parse_args():
- parser = ArgumentParser(description='chatbot')
- parser.add_argument(
- '--path',
- type=str,
- default='internlm/internlm2-chat-20b',
- help='The path to the model')
- args = parser.parse_args()
- return args
-
-
-def main():
- args = parse_args()
- # Initialize the HFTransformer-based Language Model (llm)
- model = HFTransformer(
- path=args.path,
- meta_template=META,
- max_new_tokens=1024,
- top_p=0.8,
- top_k=None,
- temperature=0.1,
- repetition_penalty=1.0,
- stop_words=['<|im_end|>'])
- plugin_executor = ActionExecutor(actions=[ArxivSearch()]) # noqa: F841
- interpreter_executor = ActionExecutor(actions=[IPythonInterpreter()])
-
- chatbot = Internlm2Agent(
- llm=model,
- plugin_executor=None,
- interpreter_executor=interpreter_executor,
- protocol=Internlm2Protocol(
- meta_prompt=META_CN,
- interpreter_prompt=INTERPRETER_CN,
- plugin_prompt=PLUGIN_CN,
- tool=dict(
- begin='{start_token}{name}\n',
- start_token='<|action_start|>',
- name_map=dict(
- plugin='<|plugin|>', interpreter='<|interpreter|>'),
- belong='assistant',
- end='<|action_end|>\n',
- ),
- ),
- )
-
- def input_prompt():
- print('\ndouble enter to end input >>> ', end='', flush=True)
- sentinel = '' # ends when this string is seen
- return '\n'.join(iter(input, sentinel))
-
- history = []
- while True:
- try:
- prompt = input_prompt()
- except UnicodeDecodeError:
- print('UnicodeDecodeError')
- continue
- if prompt == 'exit':
- exit(0)
- history.append(dict(role='user', content=prompt))
- print('\nInternLm2:', end='')
- current_length = 0
- last_status = None
- for agent_return in chatbot.stream_chat(history):
- status = agent_return.state
- if status not in [
- AgentStatusCode.STREAM_ING, AgentStatusCode.CODING,
- AgentStatusCode.PLUGIN_START
- ]:
- continue
- if status != last_status:
- current_length = 0
- print('')
- if isinstance(agent_return.response, dict):
- action = f"\n\n {agent_return.response['name']}: \n\n"
- action_input = agent_return.response['parameters']
- if agent_return.response['name'] == 'IPythonInterpreter':
- action_input = action_input['command']
- response = action + action_input
- else:
- response = agent_return.response
- print(response[current_length:], end='', flush=True)
- current_length = len(response)
- last_status = status
- print('')
- history.extend(agent_return.inner_steps)
-
-
-if __name__ == '__main__':
- main()
diff --git a/examples/internlm2_agent_web_demo.py b/examples/internlm2_agent_web_demo.py
deleted file mode 100644
index ed70b840..00000000
--- a/examples/internlm2_agent_web_demo.py
+++ /dev/null
@@ -1,333 +0,0 @@
-import copy
-import hashlib
-import json
-import os
-
-import streamlit as st
-
-from lagent.actions import ActionExecutor, ArxivSearch, IPythonInterpreter
-from lagent.agents.internlm2_agent import INTERPRETER_CN, META_CN, PLUGIN_CN, Internlm2Agent, Internlm2Protocol
-from lagent.llms.lmdeploy_wrapper import LMDeployClient
-from lagent.llms.meta_template import INTERNLM2_META as META
-from lagent.schema import AgentStatusCode
-
-# from streamlit.logger import get_logger
-
-
-class SessionState:
-
- def init_state(self):
- """Initialize session state variables."""
- st.session_state['assistant'] = []
- st.session_state['user'] = []
-
- action_list = [
- ArxivSearch(),
- ]
- st.session_state['plugin_map'] = {
- action.name: action
- for action in action_list
- }
- st.session_state['model_map'] = {}
- st.session_state['model_selected'] = None
- st.session_state['plugin_actions'] = set()
- st.session_state['history'] = []
-
- def clear_state(self):
- """Clear the existing session state."""
- st.session_state['assistant'] = []
- st.session_state['user'] = []
- st.session_state['model_selected'] = None
- st.session_state['file'] = set()
- if 'chatbot' in st.session_state:
- st.session_state['chatbot']._session_history = []
-
-
-class StreamlitUI:
-
- def __init__(self, session_state: SessionState):
- self.init_streamlit()
- self.session_state = session_state
-
- def init_streamlit(self):
- """Initialize Streamlit's UI settings."""
- st.set_page_config(
- layout='wide',
- page_title='lagent-web',
- page_icon='./docs/imgs/lagent_icon.png')
- st.header(':robot_face: :blue[Lagent] Web Demo ', divider='rainbow')
- st.sidebar.title('模型控制')
- st.session_state['file'] = set()
- st.session_state['ip'] = None
-
- def setup_sidebar(self):
- """Setup the sidebar for model and plugin selection."""
- # model_name = st.sidebar.selectbox('模型选择:', options=['internlm'])
- model_name = st.sidebar.text_input('模型名称:', value='internlm2-chat-7b')
- meta_prompt = st.sidebar.text_area('系统提示词', value=META_CN)
- da_prompt = st.sidebar.text_area('数据分析提示词', value=INTERPRETER_CN)
- plugin_prompt = st.sidebar.text_area('插件提示词', value=PLUGIN_CN)
- model_ip = st.sidebar.text_input('模型IP:', value='10.140.0.220:23333')
- if model_name != st.session_state[
- 'model_selected'] or st.session_state['ip'] != model_ip:
- st.session_state['ip'] = model_ip
- model = self.init_model(model_name, model_ip)
- self.session_state.clear_state()
- st.session_state['model_selected'] = model_name
- if 'chatbot' in st.session_state:
- del st.session_state['chatbot']
- else:
- model = st.session_state['model_map'][model_name]
-
- plugin_name = st.sidebar.multiselect(
- '插件选择',
- options=list(st.session_state['plugin_map'].keys()),
- default=[],
- )
- da_flag = st.sidebar.checkbox(
- '数据分析',
- value=False,
- )
- plugin_action = [
- st.session_state['plugin_map'][name] for name in plugin_name
- ]
-
- if 'chatbot' in st.session_state:
- if len(plugin_action) > 0:
- st.session_state['chatbot']._action_executor = ActionExecutor(
- actions=plugin_action)
- else:
- st.session_state['chatbot']._action_executor = None
- if da_flag:
- st.session_state[
- 'chatbot']._interpreter_executor = ActionExecutor(
- actions=[IPythonInterpreter()])
- else:
- st.session_state['chatbot']._interpreter_executor = None
- st.session_state['chatbot']._protocol._meta_template = meta_prompt
- st.session_state['chatbot']._protocol.plugin_prompt = plugin_prompt
- st.session_state[
- 'chatbot']._protocol.interpreter_prompt = da_prompt
- if st.sidebar.button('清空对话', key='clear'):
- self.session_state.clear_state()
- uploaded_file = st.sidebar.file_uploader('上传文件')
-
- return model_name, model, plugin_action, uploaded_file, model_ip
-
- def init_model(self, model_name, ip=None):
- """Initialize the model based on the input model name."""
- model_url = f'http://{ip}'
- st.session_state['model_map'][model_name] = LMDeployClient(
- model_name=model_name,
- url=model_url,
- meta_template=META,
- max_new_tokens=1024,
- top_p=0.8,
- top_k=100,
- temperature=0,
- repetition_penalty=1.0,
- stop_words=['<|im_end|>'])
- return st.session_state['model_map'][model_name]
-
- def initialize_chatbot(self, model, plugin_action):
- """Initialize the chatbot with the given model and plugin actions."""
- return Internlm2Agent(
- llm=model,
- protocol=Internlm2Protocol(
- tool=dict(
- begin='{start_token}{name}\n',
- start_token='<|action_start|>',
- name_map=dict(
- plugin='<|plugin|>', interpreter='<|interpreter|>'),
- belong='assistant',
- end='<|action_end|>\n',
- ), ),
- max_turn=7)
-
- def render_user(self, prompt: str):
- with st.chat_message('user'):
- st.markdown(prompt)
-
- def render_assistant(self, agent_return):
- with st.chat_message('assistant'):
- for action in agent_return.actions:
- if (action) and (action.type != 'FinishAction'):
- self.render_action(action)
- st.markdown(agent_return.response)
-
- def render_plugin_args(self, action):
- action_name = action.type
- args = action.args
- import json
- parameter_dict = dict(name=action_name, parameters=args)
- parameter_str = '```json\n' + json.dumps(
- parameter_dict, indent=4, ensure_ascii=False) + '\n```'
- st.markdown(parameter_str)
-
- def render_interpreter_args(self, action):
- st.info(action.type)
- st.markdown(action.args['text'])
-
- def render_action(self, action):
- st.markdown(action.thought)
- if action.type == 'IPythonInterpreter':
- self.render_interpreter_args(action)
- elif action.type == 'FinishAction':
- pass
- else:
- self.render_plugin_args(action)
- self.render_action_results(action)
-
- def render_action_results(self, action):
- """Render the results of action, including text, images, videos, and
- audios."""
- if (isinstance(action.result, dict)):
- if 'text' in action.result:
- st.markdown('```\n' + action.result['text'] + '\n```')
- if 'image' in action.result:
- # image_path = action.result['image']
- for image_path in action.result['image']:
- image_data = open(image_path, 'rb').read()
- st.image(image_data, caption='Generated Image')
- if 'video' in action.result:
- video_data = action.result['video']
- video_data = open(video_data, 'rb').read()
- st.video(video_data)
- if 'audio' in action.result:
- audio_data = action.result['audio']
- audio_data = open(audio_data, 'rb').read()
- st.audio(audio_data)
- elif isinstance(action.result, list):
- for item in action.result:
- if item['type'] == 'text':
- st.markdown('```\n' + item['content'] + '\n```')
- elif item['type'] == 'image':
- image_data = open(item['content'], 'rb').read()
- st.image(image_data, caption='Generated Image')
- elif item['type'] == 'video':
- video_data = open(item['content'], 'rb').read()
- st.video(video_data)
- elif item['type'] == 'audio':
- audio_data = open(item['content'], 'rb').read()
- st.audio(audio_data)
- if action.errmsg:
- st.error(action.errmsg)
-
-
-def main():
- # logger = get_logger(__name__)
- # Initialize Streamlit UI and setup sidebar
- if 'ui' not in st.session_state:
- session_state = SessionState()
- session_state.init_state()
- st.session_state['ui'] = StreamlitUI(session_state)
-
- else:
- st.set_page_config(
- layout='wide',
- page_title='lagent-web',
- page_icon='./docs/imgs/lagent_icon.png')
- st.header(':robot_face: :blue[Lagent] Web Demo ', divider='rainbow')
- _, model, plugin_action, uploaded_file, _ = st.session_state[
- 'ui'].setup_sidebar()
-
- # Initialize chatbot if it is not already initialized
- # or if the model has changed
- if 'chatbot' not in st.session_state or model != st.session_state[
- 'chatbot']._llm:
- st.session_state['chatbot'] = st.session_state[
- 'ui'].initialize_chatbot(model, plugin_action)
- st.session_state['session_history'] = []
-
- for prompt, agent_return in zip(st.session_state['user'],
- st.session_state['assistant']):
- st.session_state['ui'].render_user(prompt)
- st.session_state['ui'].render_assistant(agent_return)
-
- if user_input := st.chat_input(''):
- with st.container():
- st.session_state['ui'].render_user(user_input)
- st.session_state['user'].append(user_input)
- # Add file uploader to sidebar
- if (uploaded_file
- and uploaded_file.name not in st.session_state['file']):
-
- st.session_state['file'].add(uploaded_file.name)
- file_bytes = uploaded_file.read()
- file_type = uploaded_file.type
- if 'image' in file_type:
- st.image(file_bytes, caption='Uploaded Image')
- elif 'video' in file_type:
- st.video(file_bytes, caption='Uploaded Video')
- elif 'audio' in file_type:
- st.audio(file_bytes, caption='Uploaded Audio')
- # Save the file to a temporary location and get the path
-
- postfix = uploaded_file.name.split('.')[-1]
- # prefix = str(uuid.uuid4())
- prefix = hashlib.md5(file_bytes).hexdigest()
- filename = f'{prefix}.{postfix}'
- file_path = os.path.join(root_dir, filename)
- with open(file_path, 'wb') as tmpfile:
- tmpfile.write(file_bytes)
- file_size = os.stat(file_path).st_size / 1024 / 1024
- file_size = f'{round(file_size, 2)} MB'
- # st.write(f'File saved at: {file_path}')
- user_input = [
- dict(role='user', content=user_input),
- dict(
- role='user',
- content=json.dumps(dict(path=file_path, size=file_size)),
- name='file')
- ]
- if isinstance(user_input, str):
- user_input = [dict(role='user', content=user_input)]
- st.session_state['last_status'] = AgentStatusCode.SESSION_READY
- for agent_return in st.session_state['chatbot'].stream_chat(
- st.session_state['session_history'] + user_input):
- if agent_return.state == AgentStatusCode.PLUGIN_RETURN:
- with st.container():
- st.session_state['ui'].render_plugin_args(
- agent_return.actions[-1])
- st.session_state['ui'].render_action_results(
- agent_return.actions[-1])
- elif agent_return.state == AgentStatusCode.CODE_RETURN:
- with st.container():
- st.session_state['ui'].render_action_results(
- agent_return.actions[-1])
- elif (agent_return.state == AgentStatusCode.STREAM_ING
- or agent_return.state == AgentStatusCode.CODING):
- # st.markdown(agent_return.response)
- # 清除占位符的当前内容,并显示新内容
- with st.container():
- if agent_return.state != st.session_state['last_status']:
- st.session_state['temp'] = ''
- placeholder = st.empty()
- st.session_state['placeholder'] = placeholder
- if isinstance(agent_return.response, dict):
- action = f"\n\n {agent_return.response['name']}: \n\n"
- action_input = agent_return.response['parameters']
- if agent_return.response[
- 'name'] == 'IPythonInterpreter':
- action_input = action_input['command']
- response = action + action_input
- else:
- response = agent_return.response
- st.session_state['temp'] = response
- st.session_state['placeholder'].markdown(
- st.session_state['temp'])
- elif agent_return.state == AgentStatusCode.END:
- st.session_state['session_history'] += (
- user_input + agent_return.inner_steps)
- agent_return = copy.deepcopy(agent_return)
- agent_return.response = st.session_state['temp']
- st.session_state['assistant'].append(
- copy.deepcopy(agent_return))
- st.session_state['last_status'] = agent_return.state
-
-
-if __name__ == '__main__':
- root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
- root_dir = os.path.join(root_dir, 'tmp_dir')
- os.makedirs(root_dir, exist_ok=True)
- main()
diff --git a/examples/internlm2_agent_web_demo_hf.py b/examples/internlm2_agent_web_demo_hf.py
deleted file mode 100644
index 79508e22..00000000
--- a/examples/internlm2_agent_web_demo_hf.py
+++ /dev/null
@@ -1,332 +0,0 @@
-import copy
-import hashlib
-import json
-import os
-
-import streamlit as st
-
-from lagent.actions import ActionExecutor, ArxivSearch, IPythonInterpreter
-from lagent.agents.internlm2_agent import INTERPRETER_CN, META_CN, PLUGIN_CN, Internlm2Agent, Internlm2Protocol
-from lagent.llms import HFTransformer
-from lagent.llms.meta_template import INTERNLM2_META as META
-from lagent.schema import AgentStatusCode
-
-# from streamlit.logger import get_logger
-
-
-class SessionState:
-
- def init_state(self):
- """Initialize session state variables."""
- st.session_state['assistant'] = []
- st.session_state['user'] = []
-
- action_list = [
- ArxivSearch(),
- ]
- st.session_state['plugin_map'] = {
- action.name: action
- for action in action_list
- }
- st.session_state['model_map'] = {}
- st.session_state['model_selected'] = None
- st.session_state['plugin_actions'] = set()
- st.session_state['history'] = []
-
- def clear_state(self):
- """Clear the existing session state."""
- st.session_state['assistant'] = []
- st.session_state['user'] = []
- st.session_state['model_selected'] = None
- st.session_state['file'] = set()
- if 'chatbot' in st.session_state:
- st.session_state['chatbot']._session_history = []
-
-
-class StreamlitUI:
-
- def __init__(self, session_state: SessionState):
- self.init_streamlit()
- self.session_state = session_state
-
- def init_streamlit(self):
- """Initialize Streamlit's UI settings."""
- st.set_page_config(
- layout='wide',
- page_title='lagent-web',
- page_icon='./docs/imgs/lagent_icon.png')
- st.header(':robot_face: :blue[Lagent] Web Demo ', divider='rainbow')
- st.sidebar.title('模型控制')
- st.session_state['file'] = set()
- st.session_state['model_path'] = None
-
- def setup_sidebar(self):
- """Setup the sidebar for model and plugin selection."""
- # model_name = st.sidebar.selectbox('模型选择:', options=['internlm'])
- model_name = st.sidebar.text_input('模型名称:', value='internlm2-chat-7b')
- meta_prompt = st.sidebar.text_area('系统提示词', value=META_CN)
- da_prompt = st.sidebar.text_area('数据分析提示词', value=INTERPRETER_CN)
- plugin_prompt = st.sidebar.text_area('插件提示词', value=PLUGIN_CN)
- model_path = st.sidebar.text_input(
- '模型路径:', value='internlm/internlm2-chat-20b')
- if model_name != st.session_state['model_selected'] or st.session_state[
- 'model_path'] != model_path:
- st.session_state['model_path'] = model_path
- model = self.init_model(model_name, model_path)
- self.session_state.clear_state()
- st.session_state['model_selected'] = model_name
- if 'chatbot' in st.session_state:
- del st.session_state['chatbot']
- else:
- model = st.session_state['model_map'][model_name]
-
- plugin_name = st.sidebar.multiselect(
- '插件选择',
- options=list(st.session_state['plugin_map'].keys()),
- default=[],
- )
- da_flag = st.sidebar.checkbox(
- '数据分析',
- value=False,
- )
- plugin_action = [
- st.session_state['plugin_map'][name] for name in plugin_name
- ]
-
- if 'chatbot' in st.session_state:
- if len(plugin_action) > 0:
- st.session_state['chatbot']._action_executor = ActionExecutor(
- actions=plugin_action)
- else:
- st.session_state['chatbot']._action_executor = None
- if da_flag:
- st.session_state[
- 'chatbot']._interpreter_executor = ActionExecutor(
- actions=[IPythonInterpreter()])
- else:
- st.session_state['chatbot']._interpreter_executor = None
- st.session_state['chatbot']._protocol._meta_template = meta_prompt
- st.session_state['chatbot']._protocol.plugin_prompt = plugin_prompt
- st.session_state[
- 'chatbot']._protocol.interpreter_prompt = da_prompt
- if st.sidebar.button('清空对话', key='clear'):
- self.session_state.clear_state()
- uploaded_file = st.sidebar.file_uploader('上传文件')
-
- return model_name, model, plugin_action, uploaded_file, model_path
-
- def init_model(self, model_name, path):
- """Initialize the model based on the input model name."""
- st.session_state['model_map'][model_name] = HFTransformer(
- path=path,
- meta_template=META,
- max_new_tokens=1024,
- top_p=0.8,
- top_k=None,
- temperature=0.1,
- repetition_penalty=1.0,
- stop_words=['<|im_end|>'])
- return st.session_state['model_map'][model_name]
-
- def initialize_chatbot(self, model, plugin_action):
- """Initialize the chatbot with the given model and plugin actions."""
- return Internlm2Agent(
- llm=model,
- protocol=Internlm2Protocol(
- tool=dict(
- begin='{start_token}{name}\n',
- start_token='<|action_start|>',
- name_map=dict(
- plugin='<|plugin|>', interpreter='<|interpreter|>'),
- belong='assistant',
- end='<|action_end|>\n',
- ), ),
- max_turn=7)
-
- def render_user(self, prompt: str):
- with st.chat_message('user'):
- st.markdown(prompt)
-
- def render_assistant(self, agent_return):
- with st.chat_message('assistant'):
- for action in agent_return.actions:
- if (action) and (action.type != 'FinishAction'):
- self.render_action(action)
- st.markdown(agent_return.response)
-
- def render_plugin_args(self, action):
- action_name = action.type
- args = action.args
- import json
- parameter_dict = dict(name=action_name, parameters=args)
- parameter_str = '```json\n' + json.dumps(
- parameter_dict, indent=4, ensure_ascii=False) + '\n```'
- st.markdown(parameter_str)
-
- def render_interpreter_args(self, action):
- st.info(action.type)
- st.markdown(action.args['text'])
-
- def render_action(self, action):
- st.markdown(action.thought)
- if action.type == 'IPythonInterpreter':
- self.render_interpreter_args(action)
- elif action.type == 'FinishAction':
- pass
- else:
- self.render_plugin_args(action)
- self.render_action_results(action)
-
- def render_action_results(self, action):
- """Render the results of action, including text, images, videos, and
- audios."""
- if (isinstance(action.result, dict)):
- if 'text' in action.result:
- st.markdown('```\n' + action.result['text'] + '\n```')
- if 'image' in action.result:
- # image_path = action.result['image']
- for image_path in action.result['image']:
- image_data = open(image_path, 'rb').read()
- st.image(image_data, caption='Generated Image')
- if 'video' in action.result:
- video_data = action.result['video']
- video_data = open(video_data, 'rb').read()
- st.video(video_data)
- if 'audio' in action.result:
- audio_data = action.result['audio']
- audio_data = open(audio_data, 'rb').read()
- st.audio(audio_data)
- elif isinstance(action.result, list):
- for item in action.result:
- if item['type'] == 'text':
- st.markdown('```\n' + item['content'] + '\n```')
- elif item['type'] == 'image':
- image_data = open(item['content'], 'rb').read()
- st.image(image_data, caption='Generated Image')
- elif item['type'] == 'video':
- video_data = open(item['content'], 'rb').read()
- st.video(video_data)
- elif item['type'] == 'audio':
- audio_data = open(item['content'], 'rb').read()
- st.audio(audio_data)
- if action.errmsg:
- st.error(action.errmsg)
-
-
-def main():
- # logger = get_logger(__name__)
- # Initialize Streamlit UI and setup sidebar
- if 'ui' not in st.session_state:
- session_state = SessionState()
- session_state.init_state()
- st.session_state['ui'] = StreamlitUI(session_state)
-
- else:
- st.set_page_config(
- layout='wide',
- page_title='lagent-web',
- page_icon='./docs/imgs/lagent_icon.png')
- st.header(':robot_face: :blue[Lagent] Web Demo ', divider='rainbow')
- _, model, plugin_action, uploaded_file, _ = st.session_state[
- 'ui'].setup_sidebar()
-
- # Initialize chatbot if it is not already initialized
- # or if the model has changed
- if 'chatbot' not in st.session_state or model != st.session_state[
- 'chatbot']._llm:
- st.session_state['chatbot'] = st.session_state[
- 'ui'].initialize_chatbot(model, plugin_action)
- st.session_state['session_history'] = []
-
- for prompt, agent_return in zip(st.session_state['user'],
- st.session_state['assistant']):
- st.session_state['ui'].render_user(prompt)
- st.session_state['ui'].render_assistant(agent_return)
-
- if user_input := st.chat_input(''):
- with st.container():
- st.session_state['ui'].render_user(user_input)
- st.session_state['user'].append(user_input)
- # Add file uploader to sidebar
- if (uploaded_file
- and uploaded_file.name not in st.session_state['file']):
-
- st.session_state['file'].add(uploaded_file.name)
- file_bytes = uploaded_file.read()
- file_type = uploaded_file.type
- if 'image' in file_type:
- st.image(file_bytes, caption='Uploaded Image')
- elif 'video' in file_type:
- st.video(file_bytes, caption='Uploaded Video')
- elif 'audio' in file_type:
- st.audio(file_bytes, caption='Uploaded Audio')
- # Save the file to a temporary location and get the path
-
- postfix = uploaded_file.name.split('.')[-1]
- # prefix = str(uuid.uuid4())
- prefix = hashlib.md5(file_bytes).hexdigest()
- filename = f'{prefix}.{postfix}'
- file_path = os.path.join(root_dir, filename)
- with open(file_path, 'wb') as tmpfile:
- tmpfile.write(file_bytes)
- file_size = os.stat(file_path).st_size / 1024 / 1024
- file_size = f'{round(file_size, 2)} MB'
- # st.write(f'File saved at: {file_path}')
- user_input = [
- dict(role='user', content=user_input),
- dict(
- role='user',
- content=json.dumps(dict(path=file_path, size=file_size)),
- name='file')
- ]
- if isinstance(user_input, str):
- user_input = [dict(role='user', content=user_input)]
- st.session_state['last_status'] = AgentStatusCode.SESSION_READY
- for agent_return in st.session_state['chatbot'].stream_chat(
- st.session_state['session_history'] + user_input):
- if agent_return.state == AgentStatusCode.PLUGIN_RETURN:
- with st.container():
- st.session_state['ui'].render_plugin_args(
- agent_return.actions[-1])
- st.session_state['ui'].render_action_results(
- agent_return.actions[-1])
- elif agent_return.state == AgentStatusCode.CODE_RETURN:
- with st.container():
- st.session_state['ui'].render_action_results(
- agent_return.actions[-1])
- elif (agent_return.state == AgentStatusCode.STREAM_ING
- or agent_return.state == AgentStatusCode.CODING):
- # st.markdown(agent_return.response)
- # 清除占位符的当前内容,并显示新内容
- with st.container():
- if agent_return.state != st.session_state['last_status']:
- st.session_state['temp'] = ''
- placeholder = st.empty()
- st.session_state['placeholder'] = placeholder
- if isinstance(agent_return.response, dict):
- action = f"\n\n {agent_return.response['name']}: \n\n"
- action_input = agent_return.response['parameters']
- if agent_return.response[
- 'name'] == 'IPythonInterpreter':
- action_input = action_input['command']
- response = action + action_input
- else:
- response = agent_return.response
- st.session_state['temp'] = response
- st.session_state['placeholder'].markdown(
- st.session_state['temp'])
- elif agent_return.state == AgentStatusCode.END:
- st.session_state['session_history'] += (
- user_input + agent_return.inner_steps)
- agent_return = copy.deepcopy(agent_return)
- agent_return.response = st.session_state['temp']
- st.session_state['assistant'].append(
- copy.deepcopy(agent_return))
- st.session_state['last_status'] = agent_return.state
-
-
-if __name__ == '__main__':
- root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
- root_dir = os.path.join(root_dir, 'tmp_dir')
- os.makedirs(root_dir, exist_ok=True)
- main()
diff --git a/lagent/version.py b/lagent/version.py
index 85ee255f..d9c59dd3 100644
--- a/lagent/version.py
+++ b/lagent/version.py
@@ -1,35 +1,27 @@
# Copyright (c) OpenMMLab. All rights reserved.
-__version__ = '0.5.0'
+__version__ = '0.5.0rc1'
-def parse_version_info(version_str: str, length: int = 4) -> tuple:
- """Parse a version string into a tuple.
+def parse_version_info(version_str):
+ """Parse the version information.
Args:
- version_str (str): The version string.
- length (int): The maximum number of version levels. Default: 4.
+ version_str (str): version string like '0.1.0'.
Returns:
- tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
- (1, 3, 0, 0, 0, 0), and "2.0.0rc1" is parsed into
- (2, 0, 0, 0, 'rc', 1) (when length is set to 4).
+ tuple: version information contains major, minor, micro version.
"""
- from packaging.version import parse
- version = parse(version_str)
- assert version.release, f'failed to parse version {version_str}'
- release = list(version.release)
- release = release[:length]
- if len(release) < length:
- release = release + [0] * (length - len(release))
- if version.is_prerelease:
- release.extend(list(version.pre)) # type: ignore
- elif version.is_postrelease:
- release.extend(list(version.post)) # type: ignore
- else:
- release.extend([0, 0])
- return tuple(release)
+ version_info = []
+ for x in version_str.split('.'):
+ if x.isdigit():
+ version_info.append(int(x))
+ elif x.find('rc') != -1:
+ patch_version = x.split('rc')
+ version_info.append(int(patch_version[0]))
+ version_info.append(f'rc{patch_version[1]}')
+ return tuple(version_info)
-version_info = tuple(int(x) for x in __version__.split('.')[:3])
+version_info = parse_version_info(__version__)
__all__ = ['__version__', 'version_info', 'parse_version_info']
diff --git a/setup.cfg b/setup.cfg
index e40a95da..ec9ff679 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -21,4 +21,4 @@ quiet-level = 3
ignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids,TOOD,tood,ba,warmup,nam,DOTA,dota,conveyer,astroid
[flake8]
-max-line-length = 200
+max-line-length = 119
diff --git a/usage.md b/usage.md
deleted file mode 100644
index c6ad910a..00000000
--- a/usage.md
+++ /dev/null
@@ -1,489 +0,0 @@
-# How to Use Lagent
-
-Lagent v1.0 is inspired by the design philosophy of PyTorch. We expect that the analogy of neural network layers will make the workflow clearer and more intuitive, so users only need to focus on creating layers and defining message passing between them in a Pythonic way. This is a simple tutorial to get you quickly started with building multi-agent applications.
-
-## Core Ideas
-
-### Models as Agents
-
-Agents use `AgentMessage` for communication.
-
-```python
-from typing import Dict, List
-from lagent.agents import Agent
-from lagent.schema import AgentMessage
-from lagent.llms import VllmModel, INTERNLM2_META
-
-llm = VllmModel(
- path='Qwen/Qwen2-7B-Instruct',
- meta_template=INTERNLM2_META,
- tp=1,
- top_k=1,
- temperature=1.0,
- stop_words=['<|im_end|>'],
- max_new_tokens=1024,
-)
-system_prompt = '你的回答只能从“典”、“孝”、“急”三个字中选一个。'
-agent = Agent(llm, system_prompt)
-
-user_msg = AgentMessage(sender='user', content='今天天气情况')
-bot_msg = agent(user_msg)
-print(bot_msg)
-```
-
-```
-content='急' sender='Agent' formatted=None extra_info=None type=None receiver=None stream_state=
-```
-
-### Memory as State
-
-Both input and output messages will be added to the memory of `Agent` in each forward pass. This is performed in `__call__` rather than `forward`. See the following pseudo code
-
-```python
- def __call__(self, *message):
- message = pre_hooks(message)
- add_memory(message)
- message = self.forward(*message)
- add_memory(message)
- message = post_hooks(message)
- return message
-```
-
-Inspect the memory in two ways
-
-```python
-memory: List[AgentMessage] = agent.memory.get_memory()
-print(memory)
-print('-' * 120)
-dumped_memory: Dict[str, List[dict]] = agent.state_dict()
-print(dumped_memory['memory'])
-```
-
-```
-[AgentMessage(content='今天天气情况', sender='user', formatted=None, extra_info=None, type=None, receiver=None, stream_state=), AgentMessage(content='急', sender='Agent', formatted=None, extra_info=None, type=None, receiver=None, stream_state=)]
-------------------------------------------------------------------------------------------------------------------------
-[{'content': '今天天气情况', 'sender': 'user', 'formatted': None, 'extra_info': None, 'type': None, 'receiver': None, 'stream_state': }, {'content': '急', 'sender': 'Agent', 'formatted': None, 'extra_info': None, 'type': None, 'receiver': None, 'stream_state': }]
-```
-
-
-Clear the memory of this session(`session_id=0` by default):
-
-```python
-agent.memory.reset()
-```
-
-### Custom Message Aggregation
-
-`DefaultAggregator` is called under the hood to assemble and convert `AgentMessage` to OpenAI message format.
-
-```python
- def forward(self, *message: AgentMessage, session_id=0, **kwargs) -> Union[AgentMessage, str]:
- formatted_messages = self.aggregator.aggregate(
- self.memory.get(session_id),
- self.name,
- self.output_format,
- self.template,
- )
- llm_response = self.llm.chat(formatted_messages, **kwargs)
- ...
-```
-
-Implement a simple aggregator that can receive few-shots
-
-```python
-from typing import List, Union
-from lagent.memory import Memory
-from lagent.prompts import StrParser
-from lagent.agents.aggregator import DefaultAggregator
-
-class FewshotAggregator(DefaultAggregator):
- def __init__(self, few_shot: List[dict] = None):
- self.few_shot = few_shot or []
-
- def aggregate(self,
- messages: Memory,
- name: str,
- parser: StrParser = None,
- system_instruction: Union[str, dict, List[dict]] = None) -> List[dict]:
- _message = []
- if system_instruction:
- _message.extend(
- self.aggregate_system_intruction(system_instruction))
- _message.extend(self.few_shot)
- messages = messages.get_memory()
- for message in messages:
- if message.sender == name:
- _message.append(
- dict(role='assistant', content=str(message.content)))
- else:
- user_message = message.content
- if len(_message) > 0 and _message[-1]['role'] == 'user':
- _message[-1]['content'] += user_message
- else:
- _message.append(dict(role='user', content=user_message))
- return _message
-
-agent = Agent(
- llm,
- aggregator=FewshotAggregator(
- [
- {"role": "user", "content": "今天天气"},
- {"role": "assistant", "content": "【晴】"},
- ]
- )
-)
-user_msg = AgentMessage(sender='user', content='昨天天气')
-bot_msg = agent(user_msg)
-print(bot_msg)
-```
-
-```
-content='【多云转晴,夜间有轻微降温】' sender='Agent' formatted=None extra_info=None type=None receiver=None stream_state=
-```
-
-### Flexible Response Formatting
-
-In `AgentMessage`, `formatted` is reserved to store information parsed by `output_format` from the model output.
-
-```python
- def forward(self, *message: AgentMessage, session_id=0, **kwargs) -> Union[AgentMessage, str]:
- ...
- llm_response = self.llm.chat(formatted_messages, **kwargs)
- if self.output_format:
- formatted_messages = self.output_format.parse_response(llm_response)
- return AgentMessage(
- sender=self.name,
- content=llm_response,
- formatted=formatted_messages,
- )
- ...
-```
-
-Use a tool parser as follows
-
-```python
-from lagent.prompts.parsers import ToolParser
-
-system_prompt = "逐步分析并编写Python代码解决以下问题。"
-parser = ToolParser(tool_type='code interpreter', begin='```python\n', end='\n```\n')
-llm.gen_params['stop_words'].append('\n```\n')
-agent = Agent(llm, system_prompt, output_format=parser)
-
-user_msg = AgentMessage(
- sender='user',
- content='Marie is thinking of a multiple of 63, while Jay is thinking of a '
- 'factor of 63. They happen to be thinking of the same number. There are '
- 'two possibilities for the number that each of them is thinking of, one '
- 'positive and one negative. Find the product of these two numbers.')
-bot_msg = agent(user_msg)
-print(bot_msg.model_dump_json(indent=4))
-```
-
-```
-{
- "content": "首先,我们需要找出63的所有正因数和负因数。63的正因数可以通过分解63的质因数来找出,即\\(63 = 3^2 \\times 7\\)。因此,63的正因数包括1, 3, 7, 9, 21, 和 63。对于负因数,我们只需将上述正因数乘以-1。\n\n接下来,我们需要找出与63的正因数相乘的结果为63的数,以及与63的负因数相乘的结果为63的数。这可以通过将63除以每个正因数和负因数来实现。\n\n最后,我们将找到的两个数相乘得到最终答案。\n\n下面是Python代码实现:\n\n```python\ndef find_numbers():\n # 正因数\n positive_factors = [1, 3, 7, 9, 21, 63]\n # 负因数\n negative_factors = [-1, -3, -7, -9, -21, -63]\n \n # 找到与正因数相乘的结果为63的数\n positive_numbers = [63 / factor for factor in positive_factors]\n # 找到与负因数相乘的结果为63的数\n negative_numbers = [-63 / factor for factor in negative_factors]\n \n # 计算两个数的乘积\n product = positive_numbers[0] * negative_numbers[0]\n \n return product\n\nresult = find_numbers()\nprint(result)",
- "sender": "Agent",
- "formatted": {
- "tool_type": "code interpreter",
- "thought": "首先,我们需要找出63的所有正因数和负因数。63的正因数可以通过分解63的质因数来找出,即\\(63 = 3^2 \\times 7\\)。因此,63的正因数包括1, 3, 7, 9, 21, 和 63。对于负因数,我们只需将上述正因数乘以-1。\n\n接下来,我们需要找出与63的正因数相乘的结果为63的数,以及与63的负因数相乘的结果为63的数。这可以通过将63除以每个正因数和负因数来实现。\n\n最后,我们将找到的两个数相乘得到最终答案。\n\n下面是Python代码实现:\n\n",
- "action": "def find_numbers():\n # 正因数\n positive_factors = [1, 3, 7, 9, 21, 63]\n # 负因数\n negative_factors = [-1, -3, -7, -9, -21, -63]\n \n # 找到与正因数相乘的结果为63的数\n positive_numbers = [63 / factor for factor in positive_factors]\n # 找到与负因数相乘的结果为63的数\n negative_numbers = [-63 / factor for factor in negative_factors]\n \n # 计算两个数的乘积\n product = positive_numbers[0] * negative_numbers[0]\n \n return product\n\nresult = find_numbers()\nprint(result)",
- "status": 1
- },
- "extra_info": null,
- "type": null,
- "receiver": null,
- "stream_state": 0
-}
-```
-
-### Consistency of Tool Calling
-
-`ActionExecutor` uses the same communication data structure as `Agent`, but requires the content of input `AgentMessage` to be a dict containing:
-
-- `name`: tool name, e.g. `'IPythonInterpreter'`, `'WebBrowser.search'`.
-- `parameters`: keyword arguments of the tool API, e.g. `{'command': 'import math;math.sqrt(2)'}`, `{'query': ['recent progress in AI']}`.
-
-You can register custom hooks for message conversion.
-
-```python
-from lagent.hooks import Hook
-from lagent.schema import ActionReturn, ActionStatusCode, AgentMessage
-from lagent.actions import ActionExecutor, IPythonInteractive
-
-class CodeProcessor(Hook):
- def before_action(self, executor, message, session_id):
- message = message.copy(deep=True)
- message.content = dict(
- name='IPythonInteractive', parameters={'command': message.formatted['action']}
- )
- return message
-
- def after_action(self, executor, message, session_id):
- action_return = message.content
- if isinstance(action_return, ActionReturn):
- if action_return.state == ActionStatusCode.SUCCESS:
- response = action_return.format_result()
- else:
- response = action_return.errmsg
- else:
- response = action_return
- message.content = response
- return message
-
-executor = ActionExecutor(actions=[IPythonInteractive()], hooks=[CodeProcessor()])
-bot_msg = AgentMessage(
- sender='Agent',
- content='首先,我们需要...',
- formatted={
- 'tool_type': 'code interpreter',
- 'thought': '首先,我们需要...',
- 'action': 'def find_numbers():\n # 正因数\n positive_factors = [1, 3, 7, 9, 21, 63]\n # 负因数\n negative_factors = [-1, -3, -7, -9, -21, -63]\n \n # 找到与正因数相乘的结果为63的数\n positive_numbers = [63 / factor for factor in positive_factors]\n # 找到与负因数相乘的结果为63的数\n negative_numbers = [-63 / factor for factor in negative_factors]\n \n # 计算两个数的乘积\n product = positive_numbers[0] * negative_numbers[0]\n \n return product\n\nresult = find_numbers()\nprint(result)',
- 'status': 1
- })
-executor_msg = executor(bot_msg)
-print(executor_msg)
-```
-
-```
-content='3969.0' sender='ActionExecutor' formatted=None extra_info=None type=None receiver=None stream_state=
-```
-
-**For convenience, Lagent provides `InternLMActionProcessor` which is adapted to messages formatted by `ToolParser` as mentioned above.**
-
-### Dual Interfaces
-
-Lagent adopts dual interface design, where almost every component(LLMs, actions, action executors...) has the corresponding asynchronous variant by prefixing its identifier with 'Async'. It is recommended to use synchronous agents for debugging and asynchronous ones for large-scale inference to make the most of idle CPU and GPU resources.
-
-However, make sure the internal consistency of agents, i.e. asynchronous agents should be equipped with asynchronous LLMs and asynchronous action executors that drive asynchronous tools.
-
-```python
-from lagent.llms import VllmModel, AsyncVllmModel, LMDeployPipeline, AsyncLMDeployPipeline
-from lagent.actions import ActionExecutor, AsyncActionExecutor, WebBrowser, AsyncWebBrowser
-from lagent.agents import Agent, AsyncAgent, AgentForInternLM, AsyncAgentForInternLM
-```
-
----
-
-## Practice
-
-- **Try to implement `forward` instead of `__call__` of subclasses unless neccesary.**
-- **Always include the `session_id` argument explicitly, which is designed for isolation of memory, LLM requests and tool invocation(e.g. maintain multiple independent IPython environments) in concurrency.**
-
-### Single Agent
-
-Math agents that solve problems by programming
-
-```python
-from lagent.agents.aggregator import InternLMToolAggregator
-
-class Coder(Agent):
- def __init__(self, model_path, system_prompt, max_turn=3):
- super().__init__()
- llm = VllmModel(
- path=model_path,
- meta_template=INTERNLM2_META,
- tp=1,
- top_k=1,
- temperature=1.0,
- stop_words=['\n```\n', '<|im_end|>'],
- max_new_tokens=1024,
- )
- self.agent = Agent(
- llm,
- system_prompt,
- output_format=ToolParser(
- tool_type='code interpreter', begin='```python\n', end='\n```\n'
- ),
- # `InternLMToolAggregator` is adapted to `ToolParser` for aggregating
- # messages with tool invocations and execution results
- aggregator=InternLMToolAggregator(),
- )
- self.executor = ActionExecutor([IPythonInteractive()], hooks=[CodeProcessor()])
- self.max_turn = max_turn
-
- def forward(self, message: AgentMessage, session_id=0) -> AgentMessage:
- for _ in range(self.max_turn):
- message = self.agent(message, session_id=session_id)
- if message.formatted['tool_type'] is None:
- return message
- message = self.executor(message, session_id=session_id)
- return message
-
-coder = Coder('Qwen/Qwen2-7B-Instruct', 'Solve the problem step by step with assistance of Python code')
-query = AgentMessage(
- sender='user',
- content='Find the projection of $\\mathbf{a}$ onto $\\mathbf{b} = '
- '\\begin{pmatrix} 1 \\\\ -3 \\end{pmatrix}$ if $\\mathbf{a} \\cdot \\mathbf{b} = 2.$'
-)
-ans = coder(query)
-print(ans.content)
-print('-' * 120)
-for msg in coder.state_dict()['agent.memory']:
- print('*' * 80)
- print(f'{msg["sender"]}:\n\n{msg["content"]}')
-```
-
-### Multiple Agents
-
-Asynchronous blogging agents that improve writing quality by self-refinement ([original AutoGen example](https://microsoft.github.io/autogen/0.2/docs/topics/prompting-and-reasoning/reflection/))
-
-```python
-import asyncio
-import os
-from lagent.llms import AsyncGPTAPI
-from lagent.agents import AsyncAgent
-os.environ['OPENAI_API_KEY'] = 'YOUR_API_KEY'
-
-class PrefixedMessageHook(Hook):
- def __init__(self, prefix: str, senders: list = None):
- self.prefix = prefix
- self.senders = senders or []
-
- def before_agent(self, agent, messages, session_id):
- for i, message in enumerate(messages):
- if message.sender in self.senders:
- message = message.copy(deep=True)
- message.content = self.prefix + message.content
- messages[i] = message
- return messages
-
-class AsyncBlogger(AsyncAgent):
- def __init__(self, model_path, writer_prompt, critic_prompt, critic_prefix='', max_turn=3):
- super().__init__()
- llm = AsyncGPTAPI(model_type=model_path, retry=5, max_new_tokens=2048)
- self.writer = AsyncAgent(llm, writer_prompt, name='writer')
- self.critic = AsyncAgent(
- llm, critic_prompt, name='critic', hooks=[PrefixedMessageHook(critic_prefix, ['writer'])]
- )
- self.max_turn = max_turn
-
- async def forward(self, message: AgentMessage, session_id=0) -> AgentMessage:
- for _ in range(self.max_turn):
- message = await self.writer(message, session_id=session_id)
- message = await self.critic(message, session_id=session_id)
- return await self.writer(message, session_id=session_id)
-
-blogger = AsyncBlogger(
- 'gpt-4o-2024-05-13',
- writer_prompt="You are an writing assistant tasked to write engaging blogpost. You try to generate the best blogpost possible for the user's request. "
- "If the user provides critique, then respond with a revised version of your previous attempts",
- critic_prompt="Generate critique and recommendations on the writing. Provide detailed recommendations, including requests for length, depth, style, etc..",
- critic_prefix='Reflect and provide critique on the following writing. \n\n',
-)
-user_prompt = (
- "Write an engaging blogpost on the recent updates in {topic}. "
- "The blogpost should be engaging and understandable for general audience. "
- "Should have more than 3 paragraphes but no longer than 1000 words.")
-bot_msgs = asyncio.get_event_loop().run_until_complete(
- asyncio.gather(
- *[
- blogger(AgentMessage(sender='user', content=user_prompt.format(topic=topic)), session_id=i)
- for i, topic in enumerate(['AI', 'Biotechnology', 'New Energy', 'Video Games', 'Pop Music'])
- ]
- )
-)
-print(bot_msgs[0].content)
-print('-' * 120)
-for msg in blogger.state_dict(session_id=0)['writer.memory']:
- print('*' * 80)
- print(f'{msg["sender"]}:\n\n{msg["content"]}')
-print('-' * 120)
-for msg in blogger.state_dict(session_id=0)['critic.memory']:
- print('*' * 80)
- print(f'{msg["sender"]}:\n\n{msg["content"]}')
-```
-
-A multi-agent workflow that performs information retrieval, data collection and chart plotting ([original LangGraph example](https://vijaykumarkartha.medium.com/multiple-ai-agents-creating-multi-agent-workflows-using-langgraph-and-langchain-0587406ec4e6))
-
-
-

-
-
-```python
-import json
-from lagent.actions import IPythonInterpreter, WebBrowser, ActionExecutor
-from lagent.agents.stream import get_plugin_prompt
-from lagent.llms import GPTAPI
-from lagent.hooks import InternLMActionProcessor
-
-TOOL_TEMPLATE = (
- "You are a helpful AI assistant, collaborating with other assistants. Use the provided tools to progress"
- " towards answering the question. If you are unable to fully answer, that's OK, another assistant with"
- " different tools will help where you left off. Execute what you can to make progress. If you or any of"
- " the other assistants have the final answer or deliverable, prefix your response with {finish_pattern}"
- " so the team knows to stop. You have access to the following tools:\n{tool_description}\nPlease provide"
- " your thought process when you need to use a tool, followed by the call statement in this format:"
- "\n{invocation_format}\\\\n**{system_prompt}**"
-)
-
-class DataVisualizer(Agent):
- def __init__(self, model_path, research_prompt, chart_prompt, finish_pattern="Final Answer", max_turn=10):
- super().__init__()
- llm = GPTAPI(model_path, key='YOUR_OPENAI_API_KEY', retry=5, max_new_tokens=1024, stop_words=["```\n"])
- interpreter, browser = IPythonInterpreter(), WebBrowser("BingSearch", api_key="YOUR_BING_API_KEY")
- self.researcher = Agent(
- llm,
- TOOL_TEMPLATE.format(
- finish_pattern=finish_pattern,
- tool_description=get_plugin_prompt(browser),
- invocation_format='```json\n{"name": {{tool name}}, "parameters": {{keyword arguments}}}\n```\n',
- system_prompt=research_prompt,
- ),
- output_format=ToolParser(
- "browser",
- begin="```json\n",
- end="\n```\n",
- validate=lambda x: json.loads(x.rstrip('`')),
- ),
- aggregator=InternLMToolAggregator(),
- name="researcher",
- )
- self.charter = Agent(
- llm,
- TOOL_TEMPLATE.format(
- finish_pattern=finish_pattern,
- tool_description=interpreter.name,
- invocation_format='```python\n{{code}}\n```\n',
- system_prompt=chart_prompt,
- ),
- output_format=ToolParser(
- "interpreter",
- begin="```python\n",
- end="\n```\n",
- validate=lambda x: x.rstrip('`'),
- ),
- aggregator=InternLMToolAggregator(),
- name="charter",
- )
- self.executor = ActionExecutor([interpreter, browser], hooks=[InternLMActionProcessor()])
- self.finish_pattern = finish_pattern
- self.max_turn = max_turn
-
- def forward(self, message, session_id=0):
- for _ in range(self.max_turn):
- message = self.researcher(message, session_id=session_id, stop_words=["```\n", "```python"]) # override llm stop words
- while message.formatted["tool_type"]:
- message = self.executor(message, session_id=session_id)
- message = self.researcher(message, session_id=session_id, stop_words=["```\n", "```python"])
- if self.finish_pattern in message.content:
- return message
- message = self.charter(message)
- while message.formatted["tool_type"]:
- message = self.executor(message, session_id=session_id)
- message = self.charter(message, session_id=session_id)
- if self.finish_pattern in message.content:
- return message
- return message
-
-visualizer = DataVisualizer(
- "gpt-4o-2024-05-13",
- research_prompt="You should provide accurate data for the chart generator to use.",
- chart_prompt="Any charts you display will be visible by the user.",
-)
-user_msg = AgentMessage(
- sender='user',
- content="Fetch the China's GDP over the past 5 years, then draw a line graph of it. Once you code it up, finish.")
-bot_msg = visualizer(user_msg)
-print(bot_msg.content)
-json.dump(visualizer.state_dict(), open('visualizer.json', 'w'), ensure_ascii=False, indent=4)
-```