Skip to content

Commit

Permalink
pref: adjust function call position
Browse files Browse the repository at this point in the history
  • Loading branch information
adolphnov committed Aug 4, 2024
1 parent a5e987f commit ad91729
Show file tree
Hide file tree
Showing 16 changed files with 346 additions and 393 deletions.
2 changes: 1 addition & 1 deletion adapter/docker/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ services:
ports:
- "8787:8787"
volumes:
- ./config.toml:/app/config.toml
- ./config/config.toml:/app/config.toml
environment:
- REDIS_URL=redis://redis:6379
# 设置为空时读取请求的host作为DOMAIN
Expand Down
6 changes: 1 addition & 5 deletions adapter/vercel/utils/duckduckgo.js
Original file line number Diff line number Diff line change
Expand Up @@ -254,9 +254,5 @@ export const duckduckgo_search = {
console.log(content);
return { content, time };
},
settings: {
after_prompt: "作为智能助手,请按照以下步骤有效分析并提取我提供的搜索结果,以简洁明了的方式回答我的问题:\n\n1. 阅读和评估:仔细阅读所有搜索结果,识别并优先获取来自可靠和最新来源的信息。考虑因素包括官方来源、知名机构以及信息的更新时间。\n\n2. 提取关键信息:\n • *汇率查询*:提供最新汇率并进行必要的换算。\n • *天气查询*:提供具体地点和时间的天气预报。\n • *事实性问题*:找出权威回答。\n\n3. 简洁回答:对提取的信息进行综合分析,给出简明扼要的回答。\n\n4. 识别不确定性:如果信息存在矛盾或不确定性,请解释可能原因。\n\n5. 说明信息不足:如果搜索结果无法完全回答问题,指出需要的额外信息。\n\n6. 用户友好:使用简单易懂的语言,必要时提供简短解释,确保回答易于理解。\n\n7. 附加信息:根据需要提供额外相关信息或建议,以增强回答的价值。\n\n8. 来源标注:在回答中清晰标注信息来源,包括来源网站或机构名称及数据的发布或更新时间。\n\n9. 参考列表:如果引用了多个来源,在回答最后提供简短的参考列表,列出主要信息来源。\n\n请确保目标是提供最新、最相关和最有用的信息,直接回应我的问题。避免冗长的细节,聚焦于我最关心的核心答案,并通过可靠的来源增强回答的可信度。",
after_render: (question, result) => `问题:${question}\n\n搜索结果:${result}`,
},
// after_history_length: 0,
type: 'search'
};
2 changes: 1 addition & 1 deletion dist/buildinfo.json
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"sha": "0c3e541", "timestamp": 1722687419}
{"sha": "a5e987f", "timestamp": 1722787231}
251 changes: 156 additions & 95 deletions dist/index.js

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion dist/timestamp
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1722687419
1722787231
141 changes: 10 additions & 131 deletions src/agent/llm.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import {
import {DATABASE, ENV} from '../config/env.js';
import { loadAudioLLM, loadChatLLM } from "./agents.js";
import { handleFile } from "../config/middle.js";
import { requestCompletionsFromOpenAI } from "../agent/openai.js";

/**
* @return {(function(string): number)}
Expand Down Expand Up @@ -112,11 +111,16 @@ async function requestCompletionsFromLLM(text, prompt, context, llm, modifier, o
history = modifierData.history;
text = modifierData.text;
}
const answer = await llm(text, prompt, history, context, onStream);
let answer = await llm(text, prompt, history, context, onStream);
if (context._info.lastStepHasFile) {
text = '[A FILE] ' + text;
}
if (!historyDisable && answer && typeof answer === 'string') {
if (typeof answer === 'object') {
text = answer.q;
answer = answer.a;
}

if (!historyDisable && answer) {
history.push({ role: 'user', content: text || '' });
history.push({ role: 'assistant', content: answer });
await DATABASE.put(historyKey, JSON.stringify(history)).catch(console.error);
Expand Down Expand Up @@ -193,18 +197,13 @@ export async function chatWithLLM(text, context, modifier, pointerLLM = loadChat
const prompt = context.USER_CONFIG.SYSTEM_INIT_MESSAGE;
console.log(`[START] Chat via ${llm.name}`);

if (text && ENV.TOOLS && ENV.USE_TOOLS?.length > 0) {
const result = await handleOpenaiFunctionCall(text, context);
if (result && result instanceof Response) {
return result;
}
text = result;
}

const answer = await requestCompletionsFromLLM(text, prompt, context, llm, modifier, onStream);
if (!answer) {
return sendMessageToTelegramWithContext(context)('None response');
}
if (answer instanceof Response) {
return answer;
}
context.CURRENT_CHAT_CONTEXT.parse_mode = parseMode;
if (ENV.SHOW_REPLY_BUTTON && context.CURRENT_CHAT_CONTEXT.message_id) {
try {
Expand Down Expand Up @@ -246,126 +245,6 @@ export async function chatWithLLM(text, context, modifier, pointerLLM = loadChat
}


/**
* 处理tool
*
* @param {TelegramMessage} message
* @param {Context} context
* @return {Promise<Response>}
*/
async function handleOpenaiFunctionCall(text, context) {
try {
const filter_tools = ENV.USE_TOOLS.filter((i) => Object.keys(ENV.TOOLS).includes(i)).map((t) => ENV.TOOLS[t]);
if (filter_tools.length > 0) {
let tools = filter_tools.map((tool) => {
return {
'type': 'function',
'function': tool.schema,
};
});

//默认使用的提示词与前缀
let prompt = ENV.PROMPT['tools_prompt'];
// 备份现有模型与额外配置
const bp_config = { ...context.USER_CONFIG };
// const bp_prompt = context.USER_CONFIG.SYSTEM_INIT_MESSAGE;
const bp_extra_params = { ...context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS };

context.USER_CONFIG.OPENAI_CHAT_MODEL = context.USER_CONFIG.FUNCTION_CALL_MODEL || 'gpt-4o';
if (context.USER_CONFIG.FUNCTION_CALL_BASE && context.USER_CONFIG.FUNCTION_CALL_API_KEY) {
context.USER_CONFIG.OPENAI_API_BASE = context.USER_CONFIG.FUNCTION_CALL_BASE;
context.USER_CONFIG.OPENAI_API_KEY = [context.USER_CONFIG.FUNCTION_CALL_API_KEY];
}
context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS.tools = tools;
context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS.tool_choice = 'auto';

const llm = requestCompletionsFromOpenAI;
const first_step_result = await requestCompletionsFromLLM(text, prompt, context, llm, null, null);
if (first_step_result?.content?.startsWith?.('NEED_MORE_INFO:')) {
return sendMessageToTelegramWithContext(context)(first_step_result.content.substring('NEED_MORE_INFO:'.length));
}

// 假定function为串行,且上一步的输出是下一步的输入
first_step_result.tool_calls = first_step_result?.tool_calls?.filter((i) =>
Object.keys(ENV.TOOLS).includes(i.function.name),
);
if (
!first_step_result.tool_calls ||
first_step_result.tool_calls?.length === 0 ||
first_step_result.content?.startsWith?.('NO_CALL_NEEDED')
) {
console.log('No need call function.');
// return sendMessageToTelegramWithContext(context)(`Cant found function: ${function_name}`);
} else {
// let last_llm_result = first_step_result;
// let last_func_result = null;
const options = {};
const exposure_vars = ['JINA_API_KEY'];
exposure_vars.forEach((i) => (options[i] = context.USER_CONFIG[i]));
// let function_name = null; // last_llm_result.tool_calls[0].function.name;
// let function_args = null; // JSON.parse(last_llm_result.tool_calls[0].function.arguments);
// for (const [i, func] of first_step_result.tool_calls.entries()) {

// if (i > 0) {
// prompt = ENV.TOOLS[func.function.name].settings.before_prompt;
// message = ENV.TOOLS[func.function.name].settings.before_render(text);
// // 只保留当前需要执行的function
// context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS.tools = [
// {
// 'type': 'function',
// 'function': ENV.TOOLS[func.function.name].schema,
// },
// ];
// last_llm_result = await requestCompletionsFromOpenAI(message, prompt, null, ENV, null, true);
// }
// function_name = func.function.name;
// function_args = JSON.parse(last_llm_result.tool_calls[0].function.arguments);
// context._info.setCallInfo(last_llm_result.tool_calls[0].function.arguments);
// console.log("start use function: ", function_name);
// last_func_result = await ENV.TOOLS[function_name].func(function_args, options);
// if (last_func_result instanceof Response) {
// return last_func_result;
// }
// if (!last_func_result.content) {
// return sendMessageToTelegramWithContext(context)(`None response of ${function_name}`);
// }
// // console.log(last_func_result.content);
// text = last_func_result.content;
// }

const func = first_step_result.tool_calls[0].function;
const function_name = func.name;
const function_args = JSON.parse(func.arguments);
console.log('start use function: ', function_name);
const last_func_result = await ENV.TOOLS[function_name].func(function_args, options);
context._info.setCallInfo(`${function_name} ` + `${last_func_result?.time || ''}` + `:\n${Object.values(function_args)}`);

if (!last_func_result?.content?.trim()) {
return sendMessageToTelegramWithContext(context)(`None response of ${function_name}`);
}

// console.log(last_func_result.content);
text =
ENV.TOOLS[function_name].settings?.after_render?.(text, last_func_result.content) ||
text + '\n' + last_func_result.content;
context.USER_CONFIG.SYSTEM_INIT_MESSAGE =
ENV.TOOLS[function_name].settings?.after_prompt || bp_config.SYSTEM_INIT_MESSAGE;
}

context.USER_CONFIG.OPENAI_CHAT_MODEL = bp_config.OPENAI_CHAT_MODEL;
context.USER_CONFIG.OPENAI_API_BASE = bp_config.OPENAI_API_BASE;
context.USER_CONFIG.OPENAI_API_KEY = bp_config.OPENAI_API_KEY;
context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS = bp_extra_params;
//
delete context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS.tools;
delete context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS.tool_choice;
}
return text;
} catch (e) {
return sendMessageToTelegramWithContext(context)(e.message);
}
}

export async function chatViaFileWithLLM(context) {
try {
if (!context.CURRENT_CHAT_CONTEXT.message_id) {
Expand Down
27 changes: 14 additions & 13 deletions src/agent/openai.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import "../types/context.js";
import {requestChatCompletions} from "./request.js";
import {ENV} from "../config/env.js";

import { handleOpenaiFunctionCall } from "../agent/toolHander.js";

/**
* @param {ContextType} context
Expand Down Expand Up @@ -40,12 +40,11 @@ export async function requestCompletionsFromOpenAI(message, prompt, history, con
? context.USER_CONFIG.OPENAI_VISION_MODEL
: context.USER_CONFIG.OPENAI_CHAT_MODEL;
const extra_params = context.USER_CONFIG.OPENAI_API_EXTRA_PARAMS;
const messages = [...(history || [])];
const messages = [...(history || []), { role: 'user', content: message }];

if (prompt) {
messages.push({ role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
messages.unshift({ role: context.USER_CONFIG.SYSTEM_INIT_MESSAGE_ROLE, content: prompt });
}
messages.push({ role: 'user', content: message });
// 优先取原始文件兼容claude

if (context._info?.lastStepHasFile) {
Expand All @@ -70,22 +69,24 @@ export async function requestCompletionsFromOpenAI(message, prompt, history, con
stream: onStream != null,
...(!!onStream && ENV.ENABLE_SHOWTOKENINFO && { stream_options: { include_usage: true } }),
};
if (prompt.includes('json') || prompt.includes('JSON')) {
body.response_format = {
'type': 'json_object',
};
}

const header = {
'Content-Type': 'application/json',
'Authorization': `Bearer ${API_KEY}`,
};
const options = {};

if (extra_params.tools && extra_params.tools.length > 0) {
options.fullContentExtractor = (d) => {
return d.choices?.[0]?.message;
};
if (message && !context._info?.lastStepHasFile && ENV.TOOLS && ENV.USE_TOOLS?.length > 0) {
const result = await handleOpenaiFunctionCall(url, header, body, context);
if (result.type === 'stop') {
return result.message;
} else if (result.type === 'error') {
throw new Error(result.message);
}
const resp_obj = { q: body.messages.at(-1).content }; // 修正问题内容
resp_obj.a = await requestChatCompletions(url, header, body, context, onStream, null, options);
return resp_obj;

}

return requestChatCompletions(url, header, body, context, onStream, null, options);
Expand Down
7 changes: 4 additions & 3 deletions src/agent/request.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import '../types/context.js';
import { ENV } from '../config/env.js';
import { Stream } from './stream.js';
import { fetchWithRetry } from '../utils/utils.js';

/**
*
Expand Down Expand Up @@ -107,8 +106,10 @@ export async function requestChatCompletions(url, header, body, context, onStrea
if (ENV.DEBUG_MODE) {
console.log(`url:\n${url}\nheader:\n${JSON.stringify(header)}\nbody:\n${JSON.stringify(body, null, 2)}`);
}
// 排除 function call耗时
context._info.updateStartTime();

const resp = await fetchWithRetry(url, {
const resp = await fetch(url, {
method: 'POST',
headers: header,
body: JSON.stringify(body),
Expand All @@ -126,7 +127,7 @@ export async function requestChatCompletions(url, header, body, context, onStrea
const stream = options.streamBuilder(resp, controller);
let contentFull = '';
let lengthDelta = 0;
let updateStep = 10;
let updateStep = 20;
let msgPromise = null;
let lastChunk = null;
let usage = null;
Expand Down
Loading

0 comments on commit ad91729

Please sign in to comment.