Skip to content

Commit

Permalink
chore: optimize prompt information and add additional parameters when…
Browse files Browse the repository at this point in the history
… calling the tool.
  • Loading branch information
adolphnov committed Aug 7, 2024
1 parent 622fd85 commit 44c5d89
Show file tree
Hide file tree
Showing 11 changed files with 154 additions and 134 deletions.
2 changes: 1 addition & 1 deletion dist/buildinfo.json
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"sha": "c406507", "timestamp": 1722945405}
{"sha": "622fd85", "timestamp": 1723037430}
182 changes: 91 additions & 91 deletions dist/index.js

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion dist/timestamp
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1722945405
1723037430
15 changes: 8 additions & 7 deletions src/agent/llm.js
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ export async function chatWithLLM(text, context, modifier, pointerLLM = loadChat
let nextEnableTime = null;
const sendHandler = (() => {
const question = text;
const telegraph_prefix = `Question\n> ${question.substring(0, 200)}\n---\n#Answer\n🤖 __${context._info.model}__\n`;
const telegraph_prefix = `#Question\n\`\`\`\n${question.length > 100 ? question.slice(0, 50) + '...' + question.slice(-50) : question}\n\`\`\`\n---\n#Answer\n🤖 __${context._info.model}:__\n`;
let first_time_than = true;
const author = {
short_name: context.SHARE_CONTEXT.currentBotName,
Expand All @@ -177,18 +177,19 @@ export async function chatWithLLM(text, context, modifier, pointerLLM = loadChat
text.length > ENV.TELEGRAPH_NUM_LIMIT &&
ENV.ENABLE_TELEGRAPH && CONST.GROUP_TYPES.includes(context.SHARE_CONTEXT.chatType)
) {
let telegraph_suffix = `\n---\n\n\`\`\`\ndebug info:\n${context._info.message_title}\n\`\`\``;
let telegraph_suffix = `\n---\n\`\`\`\ndebug info:\n\n${ENV.CALL_INFO ? '' : context._info.call_info.replace('$$f_t$$', '') + '\n'}${context._info.message_title}\n\`\`\``;
if (first_time_than) {
const resp = await sendTelegraphWithContext(context)(
null,
telegraph_prefix + text + telegraph_suffix,
author,
);
const url = `https://telegra.ph/${context.SHARE_CONTEXT.telegraphPath}`;
const suffix_msg = ` ...\n\n[点击查看更多~~](${url})`;
await sendMessageToTelegramWithContext(context)(
text.substring(0, ENV.TELEGRAPH_NUM_LIMIT) + suffix_msg
);
const msg = `回答已经转换成完整文章~\n[🔗**点击查看**](${url})`;
const show_info_tag = ENV.ENABLE_SHOWINFO;
ENV.ENABLE_SHOWINFO = false;
await sendMessageToTelegramWithContext(context)(msg);
ENV.ENABLE_SHOWINFO = show_info_tag;
first_time_than = false;
return resp;
}
Expand Down Expand Up @@ -323,7 +324,7 @@ export async function chatViaFileWithLLM(context) {
}
return null;
} catch (e) {
// context.CURRENT_CHAT_CONTEXT.disable_web_page_preview = true;
context.CURRENT_CHAT_CONTEXT.disable_web_page_preview = true;
return sendMessageToTelegramWithContext(context)(e.substring(2048));
}
}
Expand Down
2 changes: 2 additions & 0 deletions src/agent/request.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import '../types/context.js';
import { ENV } from '../config/env.js';
import { Stream } from './stream.js';
import { sendMessageToTelegramWithContext } from "../telegram/telegram.js";

/**
*
Expand Down Expand Up @@ -121,6 +122,7 @@ export async function requestChatCompletions(url, header, body, context, onStrea
// 排除 function call耗时
context._info.updateStartTime();
console.log('chat start.');
setTimeout(() => sendMessageToTelegramWithContext(context)(`\`chat with llm.\``), 0);

const resp = await fetch(url, {
method: 'POST',
Expand Down
28 changes: 19 additions & 9 deletions src/agent/toolHander.js
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ export async function handleOpenaiFunctionCall(url, header, body, context) {
});

//默认使用的提示词与前缀
let prompt = ENV.PROMPT['tools_prompt'];
let prompt = tools_settings.default.prompt;
let call_url = url;
if (context.USER_CONFIG.FUNCTION_CALL_BASE) {
call_url = context.USER_CONFIG.FUNCTION_CALL_BASE + '/chat/completions';
Expand All @@ -40,6 +40,7 @@ export async function handleOpenaiFunctionCall(url, header, body, context) {
model: context.USER_CONFIG.FUNCTION_CALL_MODEL,
tools,
tool_choice: 'auto',
...tools_settings.default.extra_params,
messages: body.messages,
stream: false,
};
Expand All @@ -59,14 +60,16 @@ export async function handleOpenaiFunctionCall(url, header, body, context) {
const exposure_vars = ['JINA_API_KEY'];
exposure_vars.forEach((i) => (opt[i] = context.USER_CONFIG[i]));
const original_question = body.messages.at(-1).content;
let final_prompt = context.USER_CONFIG.SYSTEM_INIT_MESSAGE;
// let final_prompt = context.USER_CONFIG.SYSTEM_INIT_MESSAGE;
const stopLoopType = 'web_crawler';
const INFO_LENGTH_LIMIT = 80;
let final_tool_type = null;

while (call_times > 0 && call_body.tools.length > 0){
const start_time = new Date();
// setTimeout(() => sendMessageToTelegramWithContext(context)(`\`ask llm about func call.\``), 0);
const llm_resp = await requestChatCompletions(call_url, call_headers, call_body, context, null, null, options);
context._info.setCallInfo(((new Date() - start_time) / 1000).toFixed(1) + 's', 'c_t');
sendMessageToTelegramWithContext(context)('...');
llm_resp.tool_calls =
llm_resp?.tool_calls?.filter((i) => Object.keys(ENV.TOOLS).includes(i.function.name)) || [];
if (llm_resp.content?.startsWith('```json\n')) {
Expand Down Expand Up @@ -99,13 +102,14 @@ export async function handleOpenaiFunctionCall(url, header, body, context) {
).then((results) => results.filter((result) => result !== 'Timeout'));
};
let exec_times = ENV.CON_EXEC_FUN_NUM;
setTimeout(() => sendMessageToTelegramWithContext(context)(`\`call ${llm_resp.tool_calls[0].function.name}\``), 0);
for (const func of llm_resp.tool_calls) {
if (exec_times <= 0) break;
const name = func.function.name;
call_body.tools = call_body.tools.filter(t => t.function.name !== name);
const args = JSON.parse(func.function.arguments);
let args_i = Object.values(args).join();
if (args_i.length > 80) args_i = args_i.substring(0, 80) + '...';
if (args_i.length > INFO_LENGTH_LIMIT) args_i = args_i.substring(0, INFO_LENGTH_LIMIT) + '...';
context._info.setCallInfo(`${name}:${args_i}`, 'f_i');
console.log('start use function: ', name);
funcPromise.push(ENV.TOOLS[name].func(args, opt, signal));
Expand All @@ -129,18 +133,24 @@ export async function handleOpenaiFunctionCall(url, header, body, context) {
throw new Error('None response in func call.');

}

// call_messages.pop();
const tool_type = ENV.TOOLS[llm_resp.tool_calls[0].function.name].type;
const render = tools_settings[tool_type].render;
final_tool_type = ENV.TOOLS[llm_resp.tool_calls[0].function.name].type;
const render = tools_settings[final_tool_type].render;
call_messages.push({
role: 'user',
content: render?.(original_question, content_text) || original_question + '\n\n' + content_text,
});
if (tools_settings[tool_type].prompt) final_prompt = tools_settings[tool_type].prompt;
if (tool_type === stopLoopType) break;
// if (tools_settings[tool_type].prompt) final_prompt = tools_settings[tool_type].prompt;
if (final_tool_type === stopLoopType) break;
call_times--;
}
body.messages[0].content = final_prompt;
if (final_tool_type) {
body.messages[0].content = tools_settings[final_tool_type].prompt;
for (const [key, value] of Object.entries(tools_settings[final_tool_type].extra_params)) {
body[key] = value;
}
}
}
return { type: 'continue' };
} catch (e) {
Expand Down
2 changes: 1 addition & 1 deletion src/config/context.js
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ class CurrentChatContext {
message_id = null;
reply_markup = null;
allow_sending_without_reply = null;
disable_web_page_preview = true;
disable_web_page_preview = false;
}

/**
Expand Down
7 changes: 3 additions & 4 deletions src/config/env.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import '../types/i18n.js';
import '../types/context.js';
import PROMPT from "../prompt/prompt.js";;
import prompts from "../prompt/prompt.js";;

/**
* @class
Expand Down Expand Up @@ -269,9 +269,9 @@ class Environment {
// 提示词 修改SYSTEM_INIT_MESSAGE时使用 使用 /set 指令快速切换
// 可配合CHAT_MESSAGE_TRIGGER: 'role:':'/setenv SYSTEM_INIT_MESSAGE=~role'
// 快速修改变量:'model:':'/setenv OPENAI_CHAT_MODEL=' 'pro:':'/setenv AI_PROVIDER='
PROMPT = PROMPT;
PROMPT = prompts;
// /set 指令映射变量 | 分隔多个关系,:分隔映射
MAPPING_KEY = '-p:SYSTEM_INIT_MESSAGE|-n:MAX_HISTORY_LENGTH|-a:AI_PROVIDER|-ai:AI_IMAGE_PROVIDER|-m:CHAT_MODEL|-v:OPENAI_VISION_MODEL|-t :OPENAI_TTS_MODEL';
MAPPING_KEY = '-p:SYSTEM_INIT_MESSAGE|-n:MAX_HISTORY_LENGTH|-a:AI_PROVIDER|-ai:AI_IMAGE_PROVIDER|-m:CHAT_MODEL|-v:OPENAI_VISION_MODEL|-t :OPENAI_TTS_MODEL|-ex:OPENAI_API_EXTRA_PARAMS';
// /set 指令映射值 | 分隔多个关系,:分隔映射
MAPPING_VALUE = "";
// MAPPING_VALUE = "c35son:claude-3-5-sonnet-20240620|haiku:claude-3-haiku-20240307|g4m:gpt-4o-mini|g4:gpt-4o|rp+:command-r-plus";
Expand Down Expand Up @@ -393,7 +393,6 @@ export function initEnv(env, i18n) {
// 全局对象
DATABASE = env.DATABASE;
API_GUARD = env.API_GUARD;

// 绑定自定义命令
const customCommandPrefix = 'CUSTOM_COMMAND_';
const customCommandDescriptionPrefix = 'COMMAND_DESCRIPTION_';
Expand Down
2 changes: 1 addition & 1 deletion src/prompt/prompt.js

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 44c5d89

Please sign in to comment.