diff --git a/agent/src/index.ts b/agent/src/index.ts index ad35595d6f0..bb631a81363 100644 --- a/agent/src/index.ts +++ b/agent/src/index.ts @@ -60,6 +60,12 @@ export const wait = (minTime: number = 1000, maxTime: number = 3000) => { return new Promise((resolve) => setTimeout(resolve, waitTime)); }; +const logFetch = async (url: string, options: any) => { + elizaLogger.info(`Fetching ${url}`); + elizaLogger.info(options); + return fetch(url, options); +}; + export function parseArguments(): { character?: string; characters?: string; @@ -473,6 +479,7 @@ export async function createAgent( services: [], managers: [], cacheManager: cache, + fetch: logFetch, }); } diff --git a/packages/core/src/generation.ts b/packages/core/src/generation.ts index 12ef211a653..c4c54643d07 100644 --- a/packages/core/src/generation.ts +++ b/packages/core/src/generation.ts @@ -78,47 +78,68 @@ export async function generateText({ // allow character.json settings => secrets to override models // FIXME: add MODEL_MEDIUM support - switch(provider) { + switch (provider) { // if runtime.getSetting("LLAMACLOUD_MODEL_LARGE") is true and modelProvider is LLAMACLOUD, then use the large model - case ModelProviderName.LLAMACLOUD: { - switch(modelClass) { - case ModelClass.LARGE: { - model = runtime.getSetting("LLAMACLOUD_MODEL_LARGE") || model; - } - break; - case ModelClass.SMALL: { - model = runtime.getSetting("LLAMACLOUD_MODEL_SMALL") || model; + case ModelProviderName.LLAMACLOUD: + { + switch (modelClass) { + case ModelClass.LARGE: + { + model = + runtime.getSetting("LLAMACLOUD_MODEL_LARGE") || + model; + } + break; + case ModelClass.SMALL: + { + model = + runtime.getSetting("LLAMACLOUD_MODEL_SMALL") || + model; + } + break; } - break; } - } - break; - case ModelProviderName.TOGETHER: { - switch(modelClass) { - case ModelClass.LARGE: { - model = runtime.getSetting("TOGETHER_MODEL_LARGE") || model; - } - break; - case ModelClass.SMALL: { - model = runtime.getSetting("TOGETHER_MODEL_SMALL") || model; + break; + case ModelProviderName.TOGETHER: + { + switch (modelClass) { + case ModelClass.LARGE: + { + model = + runtime.getSetting("TOGETHER_MODEL_LARGE") || + model; + } + break; + case ModelClass.SMALL: + { + model = + runtime.getSetting("TOGETHER_MODEL_SMALL") || + model; + } + break; } - break; } - } - break; - case ModelProviderName.OPENROUTER: { - switch(modelClass) { - case ModelClass.LARGE: { - model = runtime.getSetting("LARGE_OPENROUTER_MODEL") || model; - } - break; - case ModelClass.SMALL: { - model = runtime.getSetting("SMALL_OPENROUTER_MODEL") || model; + break; + case ModelProviderName.OPENROUTER: + { + switch (modelClass) { + case ModelClass.LARGE: + { + model = + runtime.getSetting("LARGE_OPENROUTER_MODEL") || + model; + } + break; + case ModelClass.SMALL: + { + model = + runtime.getSetting("SMALL_OPENROUTER_MODEL") || + model; + } + break; } - break; } - } - break; + break; } elizaLogger.info("Selected model:", model); @@ -155,7 +176,11 @@ export async function generateText({ case ModelProviderName.HYPERBOLIC: case ModelProviderName.TOGETHER: { elizaLogger.debug("Initializing OpenAI model."); - const openai = createOpenAI({ apiKey, baseURL: endpoint }); + const openai = createOpenAI({ + apiKey, + baseURL: endpoint, + fetch: runtime.fetch, + }); const { text: openaiResponse } = await aiGenerateText({ model: openai.languageModel(model), @@ -176,7 +201,9 @@ export async function generateText({ } case ModelProviderName.GOOGLE: { - const google = createGoogleGenerativeAI(); + const google = createGoogleGenerativeAI({ + fetch: runtime.fetch, + }); const { text: googleResponse } = await aiGenerateText({ model: google(model), @@ -199,7 +226,10 @@ export async function generateText({ case ModelProviderName.ANTHROPIC: { elizaLogger.debug("Initializing Anthropic model."); - const anthropic = createAnthropic({ apiKey }); + const anthropic = createAnthropic({ + apiKey, + fetch: runtime.fetch, + }); const { text: anthropicResponse } = await aiGenerateText({ model: anthropic.languageModel(model), @@ -222,7 +252,10 @@ export async function generateText({ case ModelProviderName.CLAUDE_VERTEX: { elizaLogger.debug("Initializing Claude Vertex model."); - const anthropic = createAnthropic({ apiKey }); + const anthropic = createAnthropic({ + apiKey, + fetch: runtime.fetch, + }); const { text: anthropicResponse } = await aiGenerateText({ model: anthropic.languageModel(model), @@ -246,7 +279,11 @@ export async function generateText({ case ModelProviderName.GROK: { elizaLogger.debug("Initializing Grok model."); - const grok = createOpenAI({ apiKey, baseURL: endpoint }); + const grok = createOpenAI({ + apiKey, + baseURL: endpoint, + fetch: runtime.fetch, + }); const { text: grokResponse } = await aiGenerateText({ model: grok.languageModel(model, { @@ -269,7 +306,7 @@ export async function generateText({ } case ModelProviderName.GROQ: { - const groq = createGroq({ apiKey }); + const groq = createGroq({ apiKey, fetch: runtime.fetch }); const { text: groqResponse } = await aiGenerateText({ model: groq.languageModel(model), @@ -316,7 +353,11 @@ export async function generateText({ case ModelProviderName.REDPILL: { elizaLogger.debug("Initializing RedPill model."); const serverUrl = models[provider].endpoint; - const openai = createOpenAI({ apiKey, baseURL: serverUrl }); + const openai = createOpenAI({ + apiKey, + baseURL: serverUrl, + fetch: runtime.fetch, + }); const { text: redpillResponse } = await aiGenerateText({ model: openai.languageModel(model), @@ -339,7 +380,11 @@ export async function generateText({ case ModelProviderName.OPENROUTER: { elizaLogger.debug("Initializing OpenRouter model."); const serverUrl = models[provider].endpoint; - const openrouter = createOpenAI({ apiKey, baseURL: serverUrl }); + const openrouter = createOpenAI({ + apiKey, + baseURL: serverUrl, + fetch: runtime.fetch, + }); const { text: openrouterResponse } = await aiGenerateText({ model: openrouter.languageModel(model), @@ -365,6 +410,7 @@ export async function generateText({ const ollamaProvider = createOllama({ baseURL: models[provider].endpoint + "/api", + fetch: runtime.fetch, }); const ollama = ollamaProvider(model); @@ -389,6 +435,7 @@ export async function generateText({ const heurist = createOpenAI({ apiKey: apiKey, baseURL: endpoint, + fetch: runtime.fetch, }); const { text: heuristResponse } = await aiGenerateText({ @@ -434,7 +481,11 @@ export async function generateText({ elizaLogger.debug("Using GAIANET model with baseURL:", baseURL); - const openai = createOpenAI({ apiKey, baseURL: endpoint }); + const openai = createOpenAI({ + apiKey, + baseURL: endpoint, + fetch: runtime.fetch, + }); const { text: openaiResponse } = await aiGenerateText({ model: openai.languageModel(model), @@ -459,6 +510,7 @@ export async function generateText({ const galadriel = createOpenAI({ apiKey: apiKey, baseURL: endpoint, + fetch: runtime.fetch, }); const { text: galadrielResponse } = await aiGenerateText({ diff --git a/packages/core/src/types.ts b/packages/core/src/types.ts index d29d0b98056..00787a91699 100644 --- a/packages/core/src/types.ts +++ b/packages/core/src/types.ts @@ -992,6 +992,8 @@ export interface IAgentRuntime { evaluators: Evaluator[]; plugins: Plugin[]; + fetch?: typeof fetch | null; + messageManager: IMemoryManager; descriptionManager: IMemoryManager; documentsManager: IMemoryManager;