Skip to content

Commit

Permalink
🐛 fix: fix *_MODEL_LIST env in new provider (lobehub#5350)
Browse files Browse the repository at this point in the history
* update

* fix model list

* fix tests

* fix tests

* improve code

* update locales

* update locales

* fix

* fix ui
  • Loading branch information
arvinxx authored Jan 10, 2025
1 parent 4d25a14 commit 2b1b2a2
Show file tree
Hide file tree
Showing 40 changed files with 916 additions and 105 deletions.
2 changes: 1 addition & 1 deletion locales/ar/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "جارٍ تنزيل النموذج {{model}} "
},
"endpoint": {
"desc": "أدخل عنوان واجهة برمجة التطبيقات الخاص بـ Ollama، إذا لم يتم تحديده محليًا، يمكن تركه فارغًا",
"desc": "يجب أن تحتوي على http(s)://، يمكن تركها فارغة إذا لم يتم تحديدها محليًا",
"title": "عنوان وكيل الواجهة"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/bg-BG/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "Изтегляне на модел {{model}} "
},
"endpoint": {
"desc": "Въведете адрес на Ollama интерфейсния прокси, оставете празно, ако локално не е указано специално",
"desc": "Трябва да съдържа http(s)://, местният адрес може да остане празен, ако не е зададен допълнително",
"title": "Адрес на прокси интерфейс"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/de-DE/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "Lade Modell {{model}} herunter"
},
"endpoint": {
"desc": "Geben Sie die Proxy-Adresse der Ollama-Schnittstelle ein, leer lassen, wenn lokal nicht spezifiziert",
"desc": "Muss http(s):// enthalten, kann leer gelassen werden, wenn lokal nicht zusätzlich angegeben.",
"title": "Schnittstellen-Proxy-Adresse"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/en-US/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "Downloading model {{model}}"
},
"endpoint": {
"desc": "Enter the Ollama interface proxy address, leave blank if not specified locally",
"desc": "Must include http(s)://; can be left blank if not specified locally.",
"title": "Interface proxy address"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/es-ES/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "Descargando el modelo {{model}} "
},
"endpoint": {
"desc": "Introduce la dirección del proxy de la interfaz de Ollama, déjalo en blanco si no se ha especificado localmente",
"desc": "Debe incluir http(s)://, se puede dejar vacío si no se especifica localmente",
"title": "Dirección del proxy de la interfaz"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/fa-IR/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "در حال دانلود مدل {{model}} "
},
"endpoint": {
"desc": "آدرس پروکسی رابط Ollama را وارد کنید، اگر به صورت محلی تنظیم نشده است، می‌توانید خالی بگذارید",
"desc": "باید شامل http(s):// باشد، اگر محلی به طور اضافی مشخص نشده باشد می‌توان خالی گذاشت",
"title": "آدرس سرویس Ollama"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/fr-FR/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "Téléchargement du modèle {{model}} en cours"
},
"endpoint": {
"desc": "Saisissez l'adresse du proxy Ollama, laissez vide si non spécifié localement",
"desc": "Doit inclure http(s)://, peut rester vide si non spécifié localement",
"title": "Adresse du proxy"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/it-IT/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "Download del modello in corso {{model}}"
},
"endpoint": {
"desc": "Inserisci l'indirizzo del proxy dell'interfaccia Ollama. Lascia vuoto se non specificato localmente",
"desc": "Deve includere http(s)://, può rimanere vuoto se non specificato localmente",
"title": "Indirizzo del proxy dell'interfaccia"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/ja-JP/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "モデル{{model}}をダウンロード中"
},
"endpoint": {
"desc": "Ollamaプロキシインターフェースアドレスを入力してください。ローカルで追加の指定がない場合は空白のままにしてください",
"desc": "http(s)://を含める必要があります。ローカルで特に指定がない場合は空白のままで構いません",
"title": "プロキシインターフェースアドレス"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/ko-KR/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "모델 {{model}} 다운로드 중"
},
"endpoint": {
"desc": "Ollama 인터페이스 프록시 주소를 입력하세요. 로컬에서 별도로 지정하지 않은 경우 비워둘 수 있습니다",
"desc": "http(s)://를 포함해야 하며, 로컬에서 추가로 지정하지 않은 경우 비워둘 수 있습니다.",
"title": "인터페이스 프록시 주소"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/nl-NL/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "Model {{model}} wordt gedownload"
},
"endpoint": {
"desc": "Voer het Ollama interface proxyadres in, laat leeg indien niet specifiek aangegeven",
"desc": "Moet http(s):// bevatten, kan leeg gelaten worden als lokaal niet specifiek opgegeven",
"title": "Interface Proxyadres"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/pl-PL/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "Pobieranie modelu {{model}}"
},
"endpoint": {
"desc": "Wprowadź adres rest API Ollama, jeśli lokalnie nie określono, pozostaw puste",
"desc": "Musi zawierać http(s)://, lokalnie, jeśli nie określono inaczej, można pozostawić puste",
"title": "Adres proxy API"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/pt-BR/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "Baixando o modelo {{model}} "
},
"endpoint": {
"desc": "Insira o endereço do proxy de interface da Ollama, se não foi especificado localmente, pode deixar em branco",
"desc": "Deve incluir http(s)://, pode deixar em branco se não houver especificação local adicional",
"title": "Endereço do Proxy de Interface"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/ru-RU/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "Загрузка модели {{model}} "
},
"endpoint": {
"desc": "Введите адрес прокси-интерфейса Ollama, если локально не указано иное, можете оставить пустым",
"desc": "Должен содержать http(s)://, если локально не указано иное, можно оставить пустым",
"title": "Адрес прокси-интерфейса"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/tr-TR/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "正在下载模型 {{model}} "
},
"endpoint": {
"desc": "Ollama arayüz proxy adresini girin, yerel olarak belirtilmemişse boş bırakılabilir",
"desc": "http(s):// içermelidir, yerel olarak belirtilmemişse boş bırakılabilir",
"title": "Arayüz Proxy Adresi"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/vi-VN/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "Đang tải mô hình {{model}}"
},
"endpoint": {
"desc": "Nhập địa chỉ proxy API của Ollama, có thể để trống nếu không chỉ định cụ thể",
"desc": "Phải bao gồm http(s)://, có thể để trống nếu không chỉ định thêm cho địa phương",
"title": "Địa chỉ proxy API"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/zh-CN/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "正在下载模型 {{model}} "
},
"endpoint": {
"desc": "填入 Ollama 接口代理地址,本地未额外指定可留空",
"desc": "必须包含http(s)://,本地未额外指定可留空",
"title": "Ollama 服务地址"
},
"setup": {
Expand Down
2 changes: 1 addition & 1 deletion locales/zh-TW/modelProvider.json
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@
"title": "正在下載模型 {{model}}"
},
"endpoint": {
"desc": "填入 Ollama 接口代理地址,本地未額外指定可留空",
"desc": "必須包含http(s)://,本地未額外指定可留空",
"title": "接口代理地址"
},
"setup": {
Expand Down
4 changes: 2 additions & 2 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@
"@lobehub/chat-plugins-gateway": "^1.9.0",
"@lobehub/icons": "^1.61.1",
"@lobehub/tts": "^1.28.0",
"@lobehub/ui": "^1.163.0",
"@lobehub/ui": "^1.164.2",
"@neondatabase/serverless": "^0.10.4",
"@next/third-parties": "^15.1.4",
"@react-spring/web": "^9.7.5",
Expand Down Expand Up @@ -244,7 +244,7 @@
"@edge-runtime/vm": "^5.0.0",
"@huggingface/tasks": "^0.13.13",
"@lobehub/i18n-cli": "^1.20.3",
"@lobehub/lint": "^1.25.3",
"@lobehub/lint": "^1.25.5",
"@lobehub/seo-cli": "^1.4.3",
"@next/bundle-analyzer": "^15.1.4",
"@next/eslint-plugin-next": "^15.1.4",
Expand Down
38 changes: 38 additions & 0 deletions src/config/aiModels/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -96,3 +96,41 @@ export const LOBE_DEFAULT_MODEL_LIST = buildDefaultModelList({
zeroone,
zhipu,
});

export { default as ai21 } from './ai21';
export { default as ai360 } from './ai360';
export { default as anthropic } from './anthropic';
export { default as azure } from './azure';
export { default as baichuan } from './baichuan';
export { default as bedrock } from './bedrock';
export { default as cloudflare } from './cloudflare';
export { default as deepseek } from './deepseek';
export { default as fireworksai } from './fireworksai';
export { default as giteeai } from './giteeai';
export { default as github } from './github';
export { default as google } from './google';
export { default as groq } from './groq';
export { default as higress } from './higress';
export { default as huggingface } from './huggingface';
export { default as hunyuan } from './hunyuan';
export { default as internlm } from './internlm';
export { default as minimax } from './minimax';
export { default as mistral } from './mistral';
export { default as moonshot } from './moonshot';
export { default as novita } from './novita';
export { default as ollama } from './ollama';
export { default as openai } from './openai';
export { default as openrouter } from './openrouter';
export { default as perplexity } from './perplexity';
export { default as qwen } from './qwen';
export { default as sensenova } from './sensenova';
export { default as siliconcloud } from './siliconcloud';
export { default as spark } from './spark';
export { default as stepfun } from './stepfun';
export { default as taichu } from './taichu';
export { default as togetherai } from './togetherai';
export { default as upstage } from './upstage';
export { default as wenxin } from './wenxin';
export { default as xai } from './xai';
export { default as zeroone } from './zeroone';
export { default as zhipu } from './zhipu';
3 changes: 3 additions & 0 deletions src/config/modelProviders/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,9 @@ import XAIProvider from './xai';
import ZeroOneProvider from './zeroone';
import ZhiPuProvider from './zhipu';

/**
* @deprecated
*/
export const LOBE_DEFAULT_MODEL_LIST: ChatModelCard[] = [
OpenAIProvider.chatModels,
QwenProvider.chatModels,
Expand Down
4 changes: 3 additions & 1 deletion src/database/repositories/aiInfra/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,9 @@ export class AiInfraRepos {
): Promise<AiProviderModelListItem[] | undefined> => {
try {
const { default: providerModels } = await import(`@/config/aiModels/${providerId}`);
return (providerModels as AIChatModelCard[]).map<AiProviderModelListItem>((m) => ({

const presetList = this.providerConfigs[providerId]?.serverModelLists || providerModels;
return (presetList as AIChatModelCard[]).map<AiProviderModelListItem>((m) => ({
...m,
enabled: m.enabled || false,
source: AiModelSourceEnum.Builtin,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,7 @@ export const LobeOpenAICompatibleFactory = <T extends Record<string, any> = any>
return models.transformModel(item);
}

// TODO: should refactor after remove v1 user/modelList code
const knownModel = LOBE_DEFAULT_MODEL_LIST.find((model) => model.id === item.id);

if (knownModel) {
Expand Down
2 changes: 1 addition & 1 deletion src/locales/default/modelProvider.ts
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ export default {
title: '正在下载模型 {{model}} ',
},
endpoint: {
desc: '填入 Ollama 接口代理地址,本地未额外指定可留空',
desc: '必须包含http(s)://,本地未额外指定可留空',
title: 'Ollama 服务地址',
},
setup: {
Expand Down
2 changes: 1 addition & 1 deletion src/migrations/FromV3ToV4/index.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import type { Migration, MigrationData } from '@/migrations/VersionController';
import { transformToChatModelCards } from '@/utils/parseModels';
import { transformToChatModelCards } from '@/utils/_deprecated/parseModels';

import { V3ConfigState, V3LegacyConfig, V3OpenAIConfig, V3Settings } from './types/v3';
import { V4AzureOpenAIConfig, V4ConfigState, V4ProviderConfig, V4Settings } from './types/v4';
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
import { describe, expect, it, vi } from 'vitest';

import { getLLMConfig } from '@/config/llm';

import { genServerLLMConfig } from './genServerLLMConfig';
import { genServerLLMConfig } from './_deprecated';

// Mock ModelProvider enum
vi.mock('@/libs/agent-runtime', () => ({
Expand Down Expand Up @@ -40,7 +38,7 @@ vi.mock('@/config/llm', () => ({
}));

// Mock parse models utils
vi.mock('@/utils/parseModels', () => ({
vi.mock('@/utils/_deprecated/parseModels', () => ({
extractEnabledModels: (modelString: string, withDeploymentName?: boolean) => {
// Returns different format if withDeploymentName is true
return withDeploymentName ? [`${modelString}_withDeployment`] : [modelString];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import { getLLMConfig } from '@/config/llm';
import * as ProviderCards from '@/config/modelProviders';
import { ModelProvider } from '@/libs/agent-runtime';
import { ModelProviderCard } from '@/types/llm';
import { extractEnabledModels, transformToChatModelCards } from '@/utils/parseModels';
import { extractEnabledModels, transformToChatModelCards } from '@/utils/_deprecated/parseModels';

export const genServerLLMConfig = (specificConfig: Record<any, any>) => {
const llmConfig = getLLMConfig() as Record<string, any>;
Expand Down
42 changes: 42 additions & 0 deletions src/server/globalConfig/genServerAiProviderConfig.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
import * as AiModels from '@/config/aiModels';
import { getLLMConfig } from '@/config/llm';
import { ModelProvider } from '@/libs/agent-runtime';
import { AiFullModelCard } from '@/types/aiModel';
import { ProviderConfig } from '@/types/user/settings';
import { extractEnabledModels, transformToAiChatModelList } from '@/utils/parseModels';

export const genServerAiProvidersConfig = (specificConfig: Record<any, any>) => {
const llmConfig = getLLMConfig() as Record<string, any>;

return Object.values(ModelProvider).reduce(
(config, provider) => {
const providerUpperCase = provider.toUpperCase();
const providerCard = AiModels[provider] as AiFullModelCard[];
const providerConfig = specificConfig[provider as keyof typeof specificConfig] || {};
const providerModelList =
process.env[providerConfig.modelListKey ?? `${providerUpperCase}_MODEL_LIST`];

const defaultChatModels = providerCard.filter((c) => c.type === 'chat');

config[provider] = {
enabled: llmConfig[providerConfig.enabledKey || `ENABLED_${providerUpperCase}`],
enabledModels: extractEnabledModels(
providerModelList,
providerConfig.withDeploymentName || false,
),
serverModelLists: transformToAiChatModelList({
defaultChatModels: defaultChatModels || [],
modelString: providerModelList,
providerId: provider,
withDeploymentName: providerConfig.withDeploymentName || false,
}),
...(providerConfig.fetchOnClient !== undefined && {
fetchOnClient: providerConfig.fetchOnClient,
}),
};

return config;
},
{} as Record<string, ProviderConfig>,
);
};
24 changes: 23 additions & 1 deletion src/server/globalConfig/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,41 @@ import { enableNextAuth } from '@/const/auth';
import { parseSystemAgent } from '@/server/globalConfig/parseSystemAgent';
import { GlobalServerConfig } from '@/types/serverConfig';

import { genServerLLMConfig } from './genServerLLMConfig';
import { genServerLLMConfig } from './_deprecated';
import { genServerAiProvidersConfig } from './genServerAiProviderConfig';
import { parseAgentConfig } from './parseDefaultAgent';

export const getServerGlobalConfig = () => {
const { ACCESS_CODES, DEFAULT_AGENT_CONFIG } = getAppConfig();

const config: GlobalServerConfig = {
aiProvider: genServerAiProvidersConfig({
azure: {
enabledKey: 'ENABLED_AZURE_OPENAI',
withDeploymentName: true,
},
bedrock: {
enabledKey: 'ENABLED_AWS_BEDROCK',
modelListKey: 'AWS_BEDROCK_MODEL_LIST',
},
giteeai: {
enabledKey: 'ENABLED_GITEE_AI',
modelListKey: 'GITEE_AI_MODEL_LIST',
},
ollama: {
fetchOnClient: !process.env.OLLAMA_PROXY_URL,
},
}),
defaultAgent: {
config: parseAgentConfig(DEFAULT_AGENT_CONFIG),
},
enableUploadFileToServer: !!fileEnv.S3_SECRET_ACCESS_KEY,
enabledAccessCode: ACCESS_CODES?.length > 0,

enabledOAuthSSO: enableNextAuth,
/**
* @deprecated
*/
languageModel: genServerLLMConfig({
azure: {
enabledKey: 'ENABLED_AZURE_OPENAI',
Expand Down
4 changes: 2 additions & 2 deletions src/server/routers/lambda/aiModel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ const aiModelProcedure = authedProcedure.use(async (opts) => {
const { ctx } = opts;

const gateKeeper = await KeyVaultsGateKeeper.initWithEnvKey();
const { languageModel } = getServerGlobalConfig();
const { aiProvider } = getServerGlobalConfig();

return opts.next({
ctx: {
aiInfraRepos: new AiInfraRepos(
serverDB,
ctx.userId,
languageModel as Record<string, ProviderConfig>,
aiProvider as Record<string, ProviderConfig>,
),
aiModelModel: new AiModelModel(serverDB, ctx.userId),
gateKeeper,
Expand Down
Loading

0 comments on commit 2b1b2a2

Please sign in to comment.