diff --git a/packages/ai-core/src/browser/ai-core-preferences.ts b/packages/ai-core/src/browser/ai-core-preferences.ts index 970b1c485368f..2250931338951 100644 --- a/packages/ai-core/src/browser/ai-core-preferences.ts +++ b/packages/ai-core/src/browser/ai-core-preferences.ts @@ -21,6 +21,7 @@ import { interfaces } from '@theia/core/shared/inversify'; export const AI_CORE_PREFERENCES_TITLE = '✨ AI Features [Experimental]'; export const PREFERENCE_NAME_ENABLE_EXPERIMENTAL = 'ai-features.AiEnable.enableAI'; export const PREFERENCE_NAME_PROMPT_TEMPLATES = 'ai-features.promptTemplates.promptTemplatesFolder'; +export const PREFERENCE_NAME_REQUEST_SETTINGS = 'ai-features.modelSettings.requestSettings'; export const aiCorePreferenceSchema: PreferenceSchema = { type: 'object', @@ -55,13 +56,51 @@ export const aiCorePreferenceSchema: PreferenceSchema = { canSelectMany: false } }, - + }, + [PREFERENCE_NAME_REQUEST_SETTINGS]: { + title: 'Custom Request Settings', + markdownDescription: 'Allows specifying custom request settings for multiple models.\n\ + Each object represents the configuration for a specific model. The `modelId` field specifies the model ID, `requestSettings` defines model-specific settings.\n\ + The `providerId` field is optional and allows you to apply the settings to a specific provider. If not set, the settings will be applied to all providers.\n\ + Example providerIds: huggingface, openai, ollama, llamafile.\n\ + Refer to [our documentation](https://theia-ide.org/docs/user_ai/#custom-request-settings) for more information.', + type: 'array', + items: { + type: 'object', + properties: { + modelId: { + type: 'string', + description: 'The model id' + }, + requestSettings: { + type: 'object', + additionalProperties: true, + description: 'Settings for the specific model ID.', + }, + providerId: { + type: 'string', + description: 'The (optional) provider id to apply the settings to. If not set, the settings will be applied to all providers.', + }, + }, + }, + default: [], } } }; export interface AICoreConfiguration { [PREFERENCE_NAME_ENABLE_EXPERIMENTAL]: boolean | undefined; [PREFERENCE_NAME_PROMPT_TEMPLATES]: string | undefined; + [PREFERENCE_NAME_REQUEST_SETTINGS]: Array<{ + modelId: string; + requestSettings?: { [key: string]: unknown }; + providerId?: string; + }> | undefined; +} + +export interface RequestSetting { + modelId: string; + requestSettings?: { [key: string]: unknown }; + providerId?: string; } export const AICorePreferences = Symbol('AICorePreferences'); diff --git a/packages/ai-core/src/common/language-model.ts b/packages/ai-core/src/common/language-model.ts index 2945e9ff0d05a..4c507b74bdfcc 100644 --- a/packages/ai-core/src/common/language-model.ts +++ b/packages/ai-core/src/common/language-model.ts @@ -107,6 +107,11 @@ export interface LanguageModelMetaData { readonly family?: string; readonly maxInputTokens?: number; readonly maxOutputTokens?: number; + /** + * Default request settings for the language model. These settings can be set by a user preferences. + * Settings in a request will override these default settings. + */ + readonly defaultRequestSettings?: { [key: string]: unknown }; } export namespace LanguageModelMetaData { diff --git a/packages/ai-hugging-face/src/browser/huggingface-frontend-application-contribution.ts b/packages/ai-hugging-face/src/browser/huggingface-frontend-application-contribution.ts index 8c595133b4464..ad9cb783d01dc 100644 --- a/packages/ai-hugging-face/src/browser/huggingface-frontend-application-contribution.ts +++ b/packages/ai-hugging-face/src/browser/huggingface-frontend-application-contribution.ts @@ -18,7 +18,9 @@ import { FrontendApplicationContribution, PreferenceService } from '@theia/core/ import { inject, injectable } from '@theia/core/shared/inversify'; import { HuggingFaceLanguageModelsManager, HuggingFaceModelDescription } from '../common'; import { API_KEY_PREF, MODELS_PREF } from './huggingface-preferences'; +import { PREFERENCE_NAME_REQUEST_SETTINGS, RequestSetting } from '@theia/ai-core/lib/browser/ai-core-preferences'; +const HUGGINGFACE_PROVIDER_ID = 'huggingface'; @injectable() export class HuggingFaceFrontendApplicationContribution implements FrontendApplicationContribution { @@ -36,31 +38,58 @@ export class HuggingFaceFrontendApplicationContribution implements FrontendAppli this.manager.setApiKey(apiKey); const models = this.preferenceService.get(MODELS_PREF, []); - this.manager.createOrUpdateLanguageModels(...models.map(createHuggingFaceModelDescription)); + const requestSettings = this.preferenceService.get(PREFERENCE_NAME_REQUEST_SETTINGS, []); + this.manager.createOrUpdateLanguageModels(...models.map(modelId => this.createHuggingFaceModelDescription(modelId, requestSettings))); this.prevModels = [...models]; this.preferenceService.onPreferenceChanged(event => { if (event.preferenceName === API_KEY_PREF) { this.manager.setApiKey(event.newValue); } else if (event.preferenceName === MODELS_PREF) { - const oldModels = new Set(this.prevModels); - const newModels = new Set(event.newValue as string[]); - - const modelsToRemove = [...oldModels].filter(model => !newModels.has(model)); - const modelsToAdd = [...newModels].filter(model => !oldModels.has(model)); - - this.manager.removeLanguageModels(...modelsToRemove.map(model => `huggingface/${model}`)); - this.manager.createOrUpdateLanguageModels(...modelsToAdd.map(createHuggingFaceModelDescription)); - this.prevModels = [...event.newValue]; + this.handleModelChanges(event.newValue as string[]); + } else if (event.preferenceName === PREFERENCE_NAME_REQUEST_SETTINGS) { + this.handleRequestSettingsChanges(event.newValue as RequestSetting[]); } }); }); } -} -function createHuggingFaceModelDescription(modelId: string): HuggingFaceModelDescription { - return { - id: `huggingface/${modelId}`, - model: modelId - }; + protected handleModelChanges(newModels: string[]): void { + const oldModels = new Set(this.prevModels); + const updatedModels = new Set(newModels); + + const modelsToRemove = [...oldModels].filter(model => !updatedModels.has(model)); + const modelsToAdd = [...updatedModels].filter(model => !oldModels.has(model)); + + this.manager.removeLanguageModels(...modelsToRemove.map(model => `${HUGGINGFACE_PROVIDER_ID}/${model}`)); + const requestSettings = this.preferenceService.get(PREFERENCE_NAME_REQUEST_SETTINGS, []); + this.manager.createOrUpdateLanguageModels(...modelsToAdd.map(modelId => this.createHuggingFaceModelDescription(modelId, requestSettings))); + this.prevModels = newModels; + } + + protected handleRequestSettingsChanges(newSettings: RequestSetting[]): void { + const models = this.preferenceService.get(MODELS_PREF, []); + this.manager.createOrUpdateLanguageModels(...models.map(modelId => this.createHuggingFaceModelDescription(modelId, newSettings))); + } + + protected createHuggingFaceModelDescription( + modelId: string, + requestSettings: RequestSetting[] + ): HuggingFaceModelDescription { + const id = `${HUGGINGFACE_PROVIDER_ID}/${modelId}`; + const matchingSettings = requestSettings.filter( + setting => (!setting.providerId || setting.providerId === HUGGINGFACE_PROVIDER_ID) && setting.modelId === modelId + ); + if (matchingSettings.length > 1) { + console.warn( + `Multiple entries found for modelId "${modelId}". Using the first match and ignoring the rest.` + ); + } + const modelRequestSetting = matchingSettings[0]; + return { + id: id, + model: modelId, + defaultRequestSettings: modelRequestSetting?.requestSettings + }; + } } diff --git a/packages/ai-hugging-face/src/common/huggingface-language-models-manager.ts b/packages/ai-hugging-face/src/common/huggingface-language-models-manager.ts index a19937301e241..21a3c85550e78 100644 --- a/packages/ai-hugging-face/src/common/huggingface-language-models-manager.ts +++ b/packages/ai-hugging-face/src/common/huggingface-language-models-manager.ts @@ -26,6 +26,10 @@ export interface HuggingFaceModelDescription { * The model ID as used by the Hugging Face API. */ model: string; + /** + * Default request settings for the Hugging Face model. + */ + defaultRequestSettings?: { [key: string]: unknown }; } export interface HuggingFaceLanguageModelsManager { diff --git a/packages/ai-hugging-face/src/node/huggingface-language-model.ts b/packages/ai-hugging-face/src/node/huggingface-language-model.ts index d5b424aaadf4a..2758f588bca52 100644 --- a/packages/ai-hugging-face/src/node/huggingface-language-model.ts +++ b/packages/ai-hugging-face/src/node/huggingface-language-model.ts @@ -55,8 +55,18 @@ export class HuggingFaceModel implements LanguageModel { * @param model the model id as it is used by the Hugging Face API * @param apiKey function to retrieve the API key for Hugging Face */ - constructor(public readonly id: string, public model: string, public apiKey: () => string | undefined) { - } + constructor( + public readonly id: string, + public model: string, + public apiKey: () => string | undefined, + public readonly name?: string, + public readonly vendor?: string, + public readonly version?: string, + public readonly family?: string, + public readonly maxInputTokens?: number, + public readonly maxOutputTokens?: number, + public defaultRequestSettings?: Record + ) { } async request(request: LanguageModelRequest, cancellationToken?: CancellationToken): Promise { const hfInference = this.initializeHfInference(); @@ -67,15 +77,16 @@ export class HuggingFaceModel implements LanguageModel { } } - protected getDefaultSettings(): Record { - return { - max_new_tokens: 2024, - stop: ['<|endoftext|>', ''] - }; + protected getSettings(request: LanguageModelRequest): Record { + const settings = request.settings ? request.settings : this.defaultRequestSettings; + if (!settings) { + return {}; + } + return settings; } protected async handleNonStreamingRequest(hfInference: HfInference, request: LanguageModelRequest): Promise { - const settings = request.settings || this.getDefaultSettings(); + const settings = this.getSettings(request); const response = await hfInference.textGeneration({ model: this.model, @@ -104,7 +115,8 @@ export class HuggingFaceModel implements LanguageModel { request: LanguageModelRequest, cancellationToken?: CancellationToken ): Promise { - const settings = request.settings || this.getDefaultSettings(); + + const settings = this.getSettings(request); const stream = hfInference.textGenerationStream({ model: this.model, diff --git a/packages/ai-hugging-face/src/node/huggingface-language-models-manager-impl.ts b/packages/ai-hugging-face/src/node/huggingface-language-models-manager-impl.ts index 85073968a628b..e0c180cd7c961 100644 --- a/packages/ai-hugging-face/src/node/huggingface-language-models-manager-impl.ts +++ b/packages/ai-hugging-face/src/node/huggingface-language-models-manager-impl.ts @@ -43,8 +43,22 @@ export class HuggingFaceLanguageModelsManagerImpl implements HuggingFaceLanguage } model.model = modelDescription.model; model.apiKey = apiKeyProvider; + model.defaultRequestSettings = modelDescription.defaultRequestSettings; } else { - this.languageModelRegistry.addLanguageModels([new HuggingFaceModel(modelDescription.id, modelDescription.model, apiKeyProvider)]); + this.languageModelRegistry.addLanguageModels([ + new HuggingFaceModel( + modelDescription.id, + modelDescription.model, + apiKeyProvider, + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + modelDescription.defaultRequestSettings + ) + ]); } } } diff --git a/packages/ai-llamafile/src/browser/llamafile-command-contribution.ts b/packages/ai-llamafile/src/browser/llamafile-command-contribution.ts index eae616cfd94bd..cc7eff04d060a 100644 --- a/packages/ai-llamafile/src/browser/llamafile-command-contribution.ts +++ b/packages/ai-llamafile/src/browser/llamafile-command-contribution.ts @@ -17,8 +17,9 @@ import { AICommandHandlerFactory } from '@theia/ai-core/lib/browser/ai-command-h import { CommandContribution, CommandRegistry, MessageService } from '@theia/core'; import { PreferenceService, QuickInputService } from '@theia/core/lib/browser'; import { inject, injectable } from '@theia/core/shared/inversify'; -import { LlamafileEntry, LlamafileManager } from '../common/llamafile-manager'; +import { LlamafileManager } from '../common/llamafile-manager'; import { PREFERENCE_LLAMAFILE } from './llamafile-preferences'; +import { LlamafileEntry } from './llamafile-frontend-application-contribution'; export const StartLlamafileCommand = { id: 'llamafile.start', diff --git a/packages/ai-llamafile/src/browser/llamafile-frontend-application-contribution.ts b/packages/ai-llamafile/src/browser/llamafile-frontend-application-contribution.ts index c202ca12fe54e..e1f144acc2979 100644 --- a/packages/ai-llamafile/src/browser/llamafile-frontend-application-contribution.ts +++ b/packages/ai-llamafile/src/browser/llamafile-frontend-application-contribution.ts @@ -16,9 +16,11 @@ import { FrontendApplicationContribution, PreferenceService } from '@theia/core/lib/browser'; import { inject, injectable } from '@theia/core/shared/inversify'; -import { LlamafileEntry, LlamafileManager } from '../common/llamafile-manager'; +import { LlamafileManager, LlamafileModelDescription } from '../common/llamafile-manager'; import { PREFERENCE_LLAMAFILE } from './llamafile-preferences'; +import { PREFERENCE_NAME_REQUEST_SETTINGS, RequestSetting } from '@theia/ai-core/lib/browser/ai-core-preferences'; +const LLAMAFILE_PROVIDER_ID = 'llamafile'; @injectable() export class LlamafileFrontendApplicationContribution implements FrontendApplicationContribution { @@ -33,27 +35,92 @@ export class LlamafileFrontendApplicationContribution implements FrontendApplica onStart(): void { this.preferenceService.ready.then(() => { const llamafiles = this.preferenceService.get(PREFERENCE_LLAMAFILE, []); - this.llamafileManager.addLanguageModels(llamafiles); - llamafiles.forEach(model => this._knownLlamaFiles.set(model.name, model)); + const validLlamafiles = llamafiles.filter(LlamafileEntry.is); + + const LlamafileModelDescriptions = this.getLLamaFileModelDescriptions(validLlamafiles); + + this.llamafileManager.addLanguageModels(LlamafileModelDescriptions); + validLlamafiles.forEach(model => this._knownLlamaFiles.set(model.name, model)); this.preferenceService.onPreferenceChanged(event => { if (event.preferenceName === PREFERENCE_LLAMAFILE) { - // only new models which are actual LLamaFileEntries const newModels = event.newValue.filter((llamafileEntry: unknown) => LlamafileEntry.is(llamafileEntry)) as LlamafileEntry[]; + this.handleLlamaFilePreferenceChange(newModels); + } else if (event.preferenceName === PREFERENCE_NAME_REQUEST_SETTINGS) { + this.handleRequestSettingsChange(event.newValue as RequestSetting[]); + } + }); + }); + } - const llamafilesToAdd = newModels.filter(llamafile => - !this._knownLlamaFiles.has(llamafile.name) || !LlamafileEntry.equals(this._knownLlamaFiles.get(llamafile.name)!, llamafile)); + protected getLLamaFileModelDescriptions(llamafiles: LlamafileEntry[]): LlamafileModelDescription[] { + const requestSettings = this.preferenceService.get(PREFERENCE_NAME_REQUEST_SETTINGS, []); + return llamafiles.map(llamafile => { + const matchingSettings = requestSettings.filter( + setting => + (!setting.providerId || setting.providerId === LLAMAFILE_PROVIDER_ID) && + setting.modelId === llamafile.name + ); + if (matchingSettings.length > 1) { + console.warn(`Multiple entries found for model "${llamafile.name}". Using the first match.`); + } + return { + name: llamafile.name, + uri: llamafile.uri, + port: llamafile.port, + defaultRequestSettings: matchingSettings[0]?.requestSettings + }; + }); + } - const llamafileIdsToRemove = [...this._knownLlamaFiles.values()].filter(llamafile => - !newModels.find(a => LlamafileEntry.equals(a, llamafile))).map(a => a.name); + protected handleLlamaFilePreferenceChange(newModels: LlamafileEntry[]): void { + const llamafilesToAdd = newModels.filter(llamafile => + !this._knownLlamaFiles.has(llamafile.name) || + !LlamafileEntry.equals(this._knownLlamaFiles.get(llamafile.name)!, llamafile)); - this.llamafileManager.removeLanguageModels(llamafileIdsToRemove); - llamafileIdsToRemove.forEach(model => this._knownLlamaFiles.delete(model)); + const llamafileIdsToRemove = [...this._knownLlamaFiles.values()].filter(llamafile => + !newModels.find(newModel => LlamafileEntry.equals(newModel, llamafile))) + .map(llamafile => llamafile.name); - this.llamafileManager.addLanguageModels(llamafilesToAdd); - llamafilesToAdd.forEach(model => this._knownLlamaFiles.set(model.name, model)); - } - }); + this.llamafileManager.removeLanguageModels(llamafileIdsToRemove); + llamafileIdsToRemove.forEach(id => this._knownLlamaFiles.delete(id)); + + this.llamafileManager.addLanguageModels(this.getLLamaFileModelDescriptions(llamafilesToAdd)); + llamafilesToAdd.forEach(model => this._knownLlamaFiles.set(model.name, model)); + } + + protected handleRequestSettingsChange(newSettings: RequestSetting[]): void { + const llamafiles = Array.from(this._knownLlamaFiles.values()); + const llamafileModelDescriptions = this.getLLamaFileModelDescriptions(llamafiles); + llamafileModelDescriptions.forEach(llamafileModelDescription => { + this.llamafileManager.updateRequestSettings(llamafileModelDescription.name, llamafileModelDescription.defaultRequestSettings); }); } } + +export interface LlamafileEntry { + name: string; + uri: string; + port: number; +} + +namespace LlamafileEntry { + export function equals(a: LlamafileEntry, b: LlamafileEntry): boolean { + return ( + a.name === b.name && + a.uri === b.uri && + a.port === b.port + ); + } + + export function is(entry: unknown): entry is LlamafileEntry { + return ( + typeof entry === 'object' && + // eslint-disable-next-line no-null/no-null + entry !== null && + 'name' in entry && typeof (entry as LlamafileEntry).name === 'string' && + 'uri' in entry && typeof (entry as LlamafileEntry).uri === 'string' && + 'port' in entry && typeof (entry as LlamafileEntry).port === 'number' + ); + } +} diff --git a/packages/ai-llamafile/src/common/llamafile-language-model.ts b/packages/ai-llamafile/src/common/llamafile-language-model.ts index 78cc443d37e4f..34968377045b8 100644 --- a/packages/ai-llamafile/src/common/llamafile-language-model.ts +++ b/packages/ai-llamafile/src/common/llamafile-language-model.ts @@ -21,14 +21,37 @@ export class LlamafileLanguageModel implements LanguageModel { readonly providerId = 'llamafile'; readonly vendor: string = 'Mozilla'; - constructor(readonly name: string, readonly uri: string, readonly port: number) { - } + /** + * @param name the unique name for this language model. It will be used to identify the model in the UI. + * @param uri the URI pointing to the Llamafile model location. + * @param port the port on which the Llamafile model server operates. + * @param defaultRequestSettings optional default settings for requests made using this model. + */ + constructor( + public readonly name: string, + public readonly uri: string, + public readonly port: number, + public defaultRequestSettings?: { [key: string]: unknown } + ) { } get id(): string { return this.name; } + protected getSettings(request: LanguageModelRequest): Record { + const settings = request.settings ? request.settings : this.defaultRequestSettings; + if (!settings) { + return { + n_predict: 200, + stream: true, + stop: ['', 'Llama:', 'User:', '<|eot_id|>'], + cache_prompt: true, + }; + } + return settings; + } async request(request: LanguageModelRequest, cancellationToken?: CancellationToken): Promise { + const settings = this.getSettings(request); try { let prompt = request.messages.map(message => { switch (message.actor) { @@ -48,10 +71,7 @@ export class LlamafileLanguageModel implements LanguageModel { }, body: JSON.stringify({ prompt: prompt, - n_predict: 200, - stream: true, - stop: ['', 'Llama:', 'User:', '<|eot_id|>'], - cache_prompt: true, + ...settings }), }); diff --git a/packages/ai-llamafile/src/common/llamafile-manager.ts b/packages/ai-llamafile/src/common/llamafile-manager.ts index 561f3acdee95b..b6ffb786b0f44 100644 --- a/packages/ai-llamafile/src/common/llamafile-manager.ts +++ b/packages/ai-llamafile/src/common/llamafile-manager.ts @@ -17,34 +17,26 @@ export const LlamafileManager = Symbol('LlamafileManager'); export const LlamafileManagerPath = '/services/llamafilemanager'; +export interface LlamafileModelDescription { + name: string; + uri: string; + port: number; + /** + * Default request settings for the Llama model. + */ + defaultRequestSettings?: { [key: string]: unknown }; +} + export interface LlamafileManager { startServer(name: string): Promise; stopServer(name: string): void; getStartedLlamafiles(): Promise; setClient(client: LlamafileServerManagerClient): void; - addLanguageModels(llamaFiles: LlamafileEntry[]): Promise; + addLanguageModels(llamaFiles: LlamafileModelDescription[]): Promise; removeLanguageModels(modelIds: string[]): void; + updateRequestSettings(modelId: string, requestSettings?: { [key: string]: unknown }): void; } export interface LlamafileServerManagerClient { log(llamafileName: string, message: string): void; error(llamafileName: string, message: string): void; } - -export interface LlamafileEntry { - name: string; - uri: string; - port: number; -} - -export namespace LlamafileEntry { - export function equals(a: LlamafileEntry, b: LlamafileEntry): boolean { - return a.name === b.name && a.uri === b.uri && a.port === b.port; - } - export function is(entry: unknown): entry is LlamafileEntry { - // eslint-disable-next-line no-null/no-null - return typeof entry === 'object' && entry !== null - && 'name' in entry && typeof entry.name === 'string' - && 'uri' in entry && typeof entry.uri === 'string' - && 'port' in entry && typeof entry.port === 'number'; - } -} diff --git a/packages/ai-llamafile/src/node/llamafile-manager-impl.ts b/packages/ai-llamafile/src/node/llamafile-manager-impl.ts index 3d726457f9faa..e5d5f601e97bc 100644 --- a/packages/ai-llamafile/src/node/llamafile-manager-impl.ts +++ b/packages/ai-llamafile/src/node/llamafile-manager-impl.ts @@ -19,7 +19,7 @@ import { ChildProcessWithoutNullStreams, spawn } from 'child_process'; import { basename, dirname } from 'path'; import { fileURLToPath } from 'url'; import { LlamafileLanguageModel } from '../common/llamafile-language-model'; -import { LlamafileEntry, LlamafileManager, LlamafileServerManagerClient } from '../common/llamafile-manager'; +import { LlamafileManager, LlamafileModelDescription, LlamafileServerManagerClient } from '../common/llamafile-manager'; @injectable() export class LlamafileManagerImpl implements LlamafileManager { @@ -30,22 +30,42 @@ export class LlamafileManagerImpl implements LlamafileManager { private processMap: Map = new Map(); private client: LlamafileServerManagerClient; - async addLanguageModels(llamaFiles: LlamafileEntry[]): Promise { - for (const llamafile of llamaFiles) { + async addLanguageModels(LlamafileModelDescriptions: LlamafileModelDescription[]): Promise { + for (const llamafile of LlamafileModelDescriptions) { const model = await this.languageModelRegistry.getLanguageModel(llamafile.name); if (model) { if (!(model instanceof LlamafileLanguageModel)) { - console.warn(`Llamafile: model ${model.id} is not an LLamafile model`); + console.warn(`Llamafile: model ${model.id} is not a Llamafile model`); continue; } else { // This can happen during the initializing of more than one frontends, changes are handled in the frontend console.info(`Llamafile: skip creating or updating model ${llamafile.name} because it already exists.`); } } else { - this.languageModelRegistry.addLanguageModels([new LlamafileLanguageModel(llamafile.name, llamafile.uri, llamafile.port)]); + this.languageModelRegistry.addLanguageModels([ + new LlamafileLanguageModel( + llamafile.name, + llamafile.uri, + llamafile.port, + llamafile.defaultRequestSettings + ) + ]); } } } + + async updateRequestSettings(modelId: string, requestSettings?: { [key: string]: unknown; }): Promise { + const model = await this.languageModelRegistry.getLanguageModel(modelId); + if (model) { + if (!(model instanceof LlamafileLanguageModel)) { + console.warn(`Llamafile: model ${model.id} is not a Llamafile model`); + return; + } else { + model.defaultRequestSettings = requestSettings; + } + } + } + removeLanguageModels(modelIds: string[]): void { modelIds.filter(modelId => this.isStarted(modelId)).forEach(modelId => this.stopServer(modelId)); this.languageModelRegistry.removeLanguageModels(modelIds); diff --git a/packages/ai-ollama/src/browser/ollama-frontend-application-contribution.ts b/packages/ai-ollama/src/browser/ollama-frontend-application-contribution.ts index e08680563cd72..dc21ae55a3cfa 100644 --- a/packages/ai-ollama/src/browser/ollama-frontend-application-contribution.ts +++ b/packages/ai-ollama/src/browser/ollama-frontend-application-contribution.ts @@ -16,9 +16,11 @@ import { FrontendApplicationContribution, PreferenceService } from '@theia/core/lib/browser'; import { inject, injectable } from '@theia/core/shared/inversify'; -import { OllamaLanguageModelsManager } from '../common'; +import { OllamaLanguageModelsManager, OllamaModelDescription } from '../common'; import { HOST_PREF, MODELS_PREF } from './ollama-preferences'; +import { PREFERENCE_NAME_REQUEST_SETTINGS, RequestSetting } from '@theia/ai-core/lib/browser/ai-core-preferences'; +const OLLAMA_PROVIDER_ID = 'ollama'; @injectable() export class OllamaFrontendApplicationContribution implements FrontendApplicationContribution { @@ -36,24 +38,54 @@ export class OllamaFrontendApplicationContribution implements FrontendApplicatio this.manager.setHost(host); const models = this.preferenceService.get(MODELS_PREF, []); - this.manager.createLanguageModels(...models); + const requestSettings = this.preferenceService.get(PREFERENCE_NAME_REQUEST_SETTINGS, []); + this.manager.createOrUpdateLanguageModels(...models.map(modelId => this.createOllamaModelDescription(modelId, requestSettings))); this.prevModels = [...models]; this.preferenceService.onPreferenceChanged(event => { if (event.preferenceName === HOST_PREF) { this.manager.setHost(event.newValue); } else if (event.preferenceName === MODELS_PREF) { - const oldModels = new Set(this.prevModels); - const newModels = new Set(event.newValue as string[]); - - const modelsToRemove = [...oldModels].filter(model => !newModels.has(model)); - const modelsToAdd = [...newModels].filter(model => !oldModels.has(model)); - - this.manager.removeLanguageModels(...modelsToRemove); - this.manager.createLanguageModels(...modelsToAdd); - this.prevModels = [...event.newValue]; + this.handleModelChanges(event.newValue as string[]); + } else if (event.preferenceName === PREFERENCE_NAME_REQUEST_SETTINGS) { + this.handleRequestSettingsChange(event.newValue as RequestSetting[]); } }); }); } + + protected handleModelChanges(newModels: string[]): void { + const oldModels = new Set(this.prevModels); + const updatedModels = new Set(newModels); + + const modelsToRemove = [...oldModels].filter(model => !updatedModels.has(model)); + const modelsToAdd = [...updatedModels].filter(model => !oldModels.has(model)); + + this.manager.removeLanguageModels(...modelsToRemove); + const requestSettings = this.preferenceService.get(PREFERENCE_NAME_REQUEST_SETTINGS, []); + this.manager.createOrUpdateLanguageModels(...modelsToAdd.map(modelId => this.createOllamaModelDescription(modelId, requestSettings))); + this.prevModels = newModels; + } + + protected handleRequestSettingsChange(newSettings: RequestSetting[]): void { + const models = this.preferenceService.get(MODELS_PREF, []); + this.manager.createOrUpdateLanguageModels(...models.map(modelId => this.createOllamaModelDescription(modelId, newSettings))); + } + + protected createOllamaModelDescription(modelId: string, requestSettings: RequestSetting[]): OllamaModelDescription { + const id = `${OLLAMA_PROVIDER_ID}/${modelId}`; + const matchingSettings = requestSettings.filter( + setting => (!setting.providerId || setting.providerId === OLLAMA_PROVIDER_ID) && setting.modelId === modelId + ); + if (matchingSettings.length > 1) { + console.warn(`Multiple entries found for modelId "${modelId}". Using the first match and ignoring the rest.`); + } + + const modelRequestSetting = matchingSettings[0]; + return { + id: id, + model: modelId, + defaultRequestSettings: modelRequestSetting?.requestSettings + }; + } } diff --git a/packages/ai-ollama/src/common/ollama-language-models-manager.ts b/packages/ai-ollama/src/common/ollama-language-models-manager.ts index 2714ef3a7757a..e37cc0f6a50c5 100644 --- a/packages/ai-ollama/src/common/ollama-language-models-manager.ts +++ b/packages/ai-ollama/src/common/ollama-language-models-manager.ts @@ -13,11 +13,28 @@ // // SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-only WITH Classpath-exception-2.0 // ***************************************************************************** + export const OLLAMA_LANGUAGE_MODELS_MANAGER_PATH = '/services/ollama/language-model-manager'; export const OllamaLanguageModelsManager = Symbol('OllamaLanguageModelsManager'); + +export interface OllamaModelDescription { + /** + * The identifier of the model which will be shown in the UI. + */ + id: string; + /** + * The name or ID of the model in the Ollama environment. + */ + model: string; + /** + * Default request settings for the Ollama model. + */ + defaultRequestSettings?: { [key: string]: unknown }; +} + export interface OllamaLanguageModelsManager { host: string | undefined; setHost(host: string | undefined): void; - createLanguageModels(...modelIds: string[]): Promise; - removeLanguageModels(...modelIds: string[]): void + createOrUpdateLanguageModels(...models: OllamaModelDescription[]): Promise; + removeLanguageModels(...modelIds: string[]): void; } diff --git a/packages/ai-ollama/src/node/ollama-language-model.ts b/packages/ai-ollama/src/node/ollama-language-model.ts index 899305831f7b5..8c90ac29dcf1b 100644 --- a/packages/ai-ollama/src/node/ollama-language-model.ts +++ b/packages/ai-ollama/src/node/ollama-language-model.ts @@ -24,7 +24,7 @@ import { ToolRequest } from '@theia/ai-core'; import { CancellationToken } from '@theia/core'; -import { ChatRequest, ChatResponse, Message, Ollama, Tool } from 'ollama'; +import { ChatRequest, ChatResponse, Message, Ollama, Options, Tool } from 'ollama'; export const OllamaModelIdentifier = Symbol('OllamaModelIdentifier'); @@ -37,30 +37,40 @@ export class OllamaModel implements LanguageModel { readonly providerId = 'ollama'; readonly vendor: string = 'Ollama'; - constructor(protected readonly model: string, protected host: () => string | undefined) { - } - - get id(): string { - return this.providerId + '/' + this.model; - } - - get name(): string { - return this.model; + /** + * @param id the unique id for this language model. It will be used to identify the model in the UI. + * @param model the unique model name as used in the Ollama environment. + * @param hostProvider a function to provide the host URL for the Ollama server. + * @param defaultRequestSettings optional default settings for requests made using this model. + */ + constructor( + public readonly id: string, + protected readonly model: string, + protected host: () => string | undefined, + public defaultRequestSettings?: { [key: string]: unknown } + ) { } + + protected getSettings(request: LanguageModelRequest): Partial { + const settings = request.settings ?? this.defaultRequestSettings ?? {}; + return { + options: settings as Partial + }; } async request(request: LanguageModelRequest, cancellationToken?: CancellationToken): Promise { + const settings = this.getSettings(request); const ollama = this.initializeOllama(); if (request.response_format?.type === 'json_schema') { return this.handleStructuredOutputRequest(ollama, request); } const response = await ollama.chat({ - ...this.DEFAULT_REQUEST_SETTINGS, model: this.model, + ...this.DEFAULT_REQUEST_SETTINGS, + ...settings, messages: request.messages.map(this.toOllamaMessage), stream: true, tools: request.tools?.map(this.toOllamaTool), - ...request.settings }); cancellationToken?.onCancellationRequested(() => { @@ -77,12 +87,14 @@ export class OllamaModel implements LanguageModel { } protected async handleStructuredOutputRequest(ollama: Ollama, request: LanguageModelRequest): Promise { + const settings = this.getSettings(request); const result = await ollama.chat({ + ...settings, ...this.DEFAULT_REQUEST_SETTINGS, model: this.model, messages: request.messages.map(this.toOllamaMessage), format: 'json', - ...request.settings + stream: false, }); try { return { diff --git a/packages/ai-ollama/src/node/ollama-language-models-manager-impl.ts b/packages/ai-ollama/src/node/ollama-language-models-manager-impl.ts index 1fbd1f520c3c8..db7bae2c9f640 100644 --- a/packages/ai-ollama/src/node/ollama-language-models-manager-impl.ts +++ b/packages/ai-ollama/src/node/ollama-language-models-manager-impl.ts @@ -17,7 +17,7 @@ import { LanguageModelRegistry } from '@theia/ai-core'; import { inject, injectable } from '@theia/core/shared/inversify'; import { OllamaModel } from './ollama-language-model'; -import { OllamaLanguageModelsManager } from '../common'; +import { OllamaLanguageModelsManager, OllamaModelDescription } from '../common'; @injectable() export class OllamaLanguageModelsManagerImpl implements OllamaLanguageModelsManager { @@ -33,13 +33,26 @@ export class OllamaLanguageModelsManagerImpl implements OllamaLanguageModelsMana // Triggered from frontend. In case you want to use the models on the backend // without a frontend then call this yourself - async createLanguageModels(...modelIds: string[]): Promise { - for (const id of modelIds) { - // TODO check that the model exists in Ollama using `list`. Ask and trigger download if not. - if (!(await this.languageModelRegistry.getLanguageModel(`ollama/${id}`))) { - this.languageModelRegistry.addLanguageModels([new OllamaModel(id, () => this.host)]); + async createOrUpdateLanguageModels(...models: OllamaModelDescription[]): Promise { + for (const modelDescription of models) { + const existingModel = await this.languageModelRegistry.getLanguageModel(modelDescription.id); + const hostProvider = () => this.host; + + if (existingModel) { + if (!(existingModel instanceof OllamaModel)) { + console.warn(`Ollama: model ${modelDescription.id} is not an Ollama model`); + continue; + } + existingModel.defaultRequestSettings = modelDescription.defaultRequestSettings; } else { - console.info(`Ollama: skip creating model ${id} because it already exists`); + this.languageModelRegistry.addLanguageModels([ + new OllamaModel( + modelDescription.id, + modelDescription.model, + hostProvider, + modelDescription.defaultRequestSettings + ) + ]); } } } @@ -49,10 +62,6 @@ export class OllamaLanguageModelsManagerImpl implements OllamaLanguageModelsMana } setHost(host: string | undefined): void { - if (host) { - this._host = host; - } else { - this._host = undefined; - } + this._host = host || undefined; } } diff --git a/packages/ai-openai/src/browser/openai-frontend-application-contribution.ts b/packages/ai-openai/src/browser/openai-frontend-application-contribution.ts index 3f917c77ed1ee..689278ebfcc45 100644 --- a/packages/ai-openai/src/browser/openai-frontend-application-contribution.ts +++ b/packages/ai-openai/src/browser/openai-frontend-application-contribution.ts @@ -18,6 +18,9 @@ import { FrontendApplicationContribution, PreferenceService } from '@theia/core/ import { inject, injectable } from '@theia/core/shared/inversify'; import { OpenAiLanguageModelsManager, OpenAiModelDescription } from '../common'; import { API_KEY_PREF, CUSTOM_ENDPOINTS_PREF, MODELS_PREF } from './openai-preferences'; +import { PREFERENCE_NAME_REQUEST_SETTINGS, RequestSetting } from '@theia/ai-core/lib/browser/ai-core-preferences'; + +const OPENAI_PROVIDER_ID = 'openai'; @injectable() export class OpenAiFrontendApplicationContribution implements FrontendApplicationContribution { @@ -28,7 +31,6 @@ export class OpenAiFrontendApplicationContribution implements FrontendApplicatio @inject(OpenAiLanguageModelsManager) protected manager: OpenAiLanguageModelsManager; - // The preferenceChange.oldValue is always undefined for some reason protected prevModels: string[] = []; protected prevCustomModels: Partial[] = []; @@ -38,72 +40,123 @@ export class OpenAiFrontendApplicationContribution implements FrontendApplicatio this.manager.setApiKey(apiKey); const models = this.preferenceService.get(MODELS_PREF, []); - this.manager.createOrUpdateLanguageModels(...models.map(createOpenAIModelDescription)); + const requestSettings = this.getRequestSettingsPref(); + this.manager.createOrUpdateLanguageModels(...models.map(modelId => this.createOpenAIModelDescription(modelId, requestSettings))); this.prevModels = [...models]; const customModels = this.preferenceService.get[]>(CUSTOM_ENDPOINTS_PREF, []); - this.manager.createOrUpdateLanguageModels(...createCustomModelDescriptionsFromPreferences(customModels)); + this.manager.createOrUpdateLanguageModels(...this.createCustomModelDescriptionsFromPreferences(customModels, this.getRequestSettingsPref())); this.prevCustomModels = [...customModels]; this.preferenceService.onPreferenceChanged(event => { if (event.preferenceName === API_KEY_PREF) { this.manager.setApiKey(event.newValue); } else if (event.preferenceName === MODELS_PREF) { - const oldModels = new Set(this.prevModels); - const newModels = new Set(event.newValue as string[]); - - const modelsToRemove = [...oldModels].filter(model => !newModels.has(model)); - const modelsToAdd = [...newModels].filter(model => !oldModels.has(model)); - - this.manager.removeLanguageModels(...modelsToRemove.map(model => `openai/${model}`)); - this.manager.createOrUpdateLanguageModels(...modelsToAdd.map(createOpenAIModelDescription)); - this.prevModels = [...event.newValue]; + this.handleModelChanges(event.newValue as string[]); } else if (event.preferenceName === CUSTOM_ENDPOINTS_PREF) { - const oldModels = createCustomModelDescriptionsFromPreferences(this.prevCustomModels); - const newModels = createCustomModelDescriptionsFromPreferences(event.newValue); - - const modelsToRemove = oldModels.filter(model => !newModels.some(newModel => newModel.id === model.id)); - const modelsToAddOrUpdate = newModels.filter(newModel => - !oldModels.some(model => - model.id === newModel.id && - model.model === newModel.model && - model.url === newModel.url && - model.apiKey === newModel.apiKey && - model.enableStreaming === newModel.enableStreaming)); - - this.manager.removeLanguageModels(...modelsToRemove.map(model => model.id)); - this.manager.createOrUpdateLanguageModels(...modelsToAddOrUpdate); + this.handleCustomModelChanges(event.newValue as Partial[]); + } else if (event.preferenceName === PREFERENCE_NAME_REQUEST_SETTINGS) { + this.handleRequestSettingsChanges(event.newValue as RequestSetting[]); } }); }); } -} -const openAIModelsWithDisabledStreaming = ['o1-preview']; + protected handleModelChanges(newModels: string[]): void { + const oldModels = new Set(this.prevModels); + const updatedModels = new Set(newModels); -function createOpenAIModelDescription(modelId: string): OpenAiModelDescription { - return { - id: `openai/${modelId}`, - model: modelId, - apiKey: true, - enableStreaming: !openAIModelsWithDisabledStreaming.includes(modelId) - }; -} + const modelsToRemove = [...oldModels].filter(model => !updatedModels.has(model)); + const modelsToAdd = [...updatedModels].filter(model => !oldModels.has(model)); -function createCustomModelDescriptionsFromPreferences(preferences: Partial[]): OpenAiModelDescription[] { - return preferences.reduce((acc, pref) => { - if (!pref.model || !pref.url || typeof pref.model !== 'string' || typeof pref.url !== 'string') { - return acc; - } - return [ - ...acc, - { - id: pref.id && typeof pref.id === 'string' ? pref.id : pref.model, - model: pref.model, - url: pref.url, - apiKey: typeof pref.apiKey === 'string' || pref.apiKey === true ? pref.apiKey : undefined, - enableStreaming: pref.enableStreaming ?? true + this.manager.removeLanguageModels(...modelsToRemove.map(model => `openai/${model}`)); + const requestSettings = this.getRequestSettingsPref(); + this.manager.createOrUpdateLanguageModels(...modelsToAdd.map(modelId => this.createOpenAIModelDescription(modelId, requestSettings))); + this.prevModels = newModels; + } + + private getRequestSettingsPref(): RequestSetting[] { + return this.preferenceService.get(PREFERENCE_NAME_REQUEST_SETTINGS, []); + } + + protected handleCustomModelChanges(newCustomModels: Partial[]): void { + const requestSettings = this.getRequestSettingsPref(); + const oldModels = this.createCustomModelDescriptionsFromPreferences(this.prevCustomModels, requestSettings); + const newModels = this.createCustomModelDescriptionsFromPreferences(newCustomModels, requestSettings); + + const modelsToRemove = oldModels.filter(model => !newModels.some(newModel => newModel.id === model.id)); + const modelsToAddOrUpdate = newModels.filter(newModel => + !oldModels.some(model => + model.id === newModel.id && + model.model === newModel.model && + model.url === newModel.url && + model.apiKey === newModel.apiKey && + model.enableStreaming === newModel.enableStreaming)); + + this.manager.removeLanguageModels(...modelsToRemove.map(model => model.id)); + this.manager.createOrUpdateLanguageModels(...modelsToAddOrUpdate); + this.prevCustomModels = [...newCustomModels]; + } + + protected handleRequestSettingsChanges(newSettings: RequestSetting[]): void { + const models = this.preferenceService.get(MODELS_PREF, []); + this.manager.createOrUpdateLanguageModels(...models.map(modelId => this.createOpenAIModelDescription(modelId, newSettings))); + + const customModels = this.preferenceService.get[]>(CUSTOM_ENDPOINTS_PREF, []); + this.manager.createOrUpdateLanguageModels(...this.createCustomModelDescriptionsFromPreferences(customModels, newSettings)); + } + + protected createOpenAIModelDescription(modelId: string, requestSettings: RequestSetting[]): OpenAiModelDescription { + const id = `${OPENAI_PROVIDER_ID}/${modelId}`; + const modelRequestSetting = this.getMatchingRequestSetting(modelId, OPENAI_PROVIDER_ID, requestSettings); + return { + id: id, + model: modelId, + apiKey: true, + enableStreaming: !openAIModelsWithDisabledStreaming.includes(modelId), + defaultRequestSettings: modelRequestSetting?.requestSettings + }; + } + + protected createCustomModelDescriptionsFromPreferences( + preferences: Partial[], + requestSettings: RequestSetting[] + ): OpenAiModelDescription[] { + return preferences.reduce((acc, pref) => { + if (!pref.model || !pref.url || typeof pref.model !== 'string' || typeof pref.url !== 'string') { + return acc; } - ]; - }, []); + + const modelRequestSetting = this.getMatchingRequestSetting(pref.model, OPENAI_PROVIDER_ID, requestSettings); + + return [ + ...acc, + { + id: pref.id && typeof pref.id === 'string' ? pref.id : pref.model, + model: pref.model, + url: pref.url, + apiKey: typeof pref.apiKey === 'string' || pref.apiKey === true ? pref.apiKey : undefined, + enableStreaming: pref.enableStreaming ?? true, + defaultRequestSettings: modelRequestSetting?.requestSettings + } + ]; + }, []); + } + protected getMatchingRequestSetting( + modelId: string, + providerId: string, + requestSettings: RequestSetting[] + ): RequestSetting | undefined { + const matchingSettings = requestSettings.filter( + setting => (!setting.providerId || setting.providerId === providerId) && setting.modelId === modelId + ); + if (matchingSettings.length > 1) { + console.warn( + `Multiple entries found for provider "${providerId}" and model "${modelId}". Using the first match.` + ); + } + return matchingSettings[0]; + } } + +const openAIModelsWithDisabledStreaming = ['o1-preview']; diff --git a/packages/ai-openai/src/common/openai-language-models-manager.ts b/packages/ai-openai/src/common/openai-language-models-manager.ts index 4ce75b3779464..368c3d0875fa2 100644 --- a/packages/ai-openai/src/common/openai-language-models-manager.ts +++ b/packages/ai-openai/src/common/openai-language-models-manager.ts @@ -36,6 +36,10 @@ export interface OpenAiModelDescription { * Indicate whether the streaming API shall be used. */ enableStreaming: boolean; + /** + * Default request settings for the OpenAI model. + */ + defaultRequestSettings?: { [key: string]: unknown }; } export interface OpenAiLanguageModelsManager { apiKey: string | undefined; diff --git a/packages/ai-openai/src/node/openai-language-model.ts b/packages/ai-openai/src/node/openai-language-model.ts index 289fef89ebaaa..d43e3f4fd97c0 100644 --- a/packages/ai-openai/src/node/openai-language-model.ts +++ b/packages/ai-openai/src/node/openai-language-model.ts @@ -57,10 +57,27 @@ export class OpenAiModel implements LanguageModel { * @param enableStreaming whether the streaming API shall be used * @param apiKey a function that returns the API key to use for this model, called on each request * @param url the OpenAI API compatible endpoint where the model is hosted. If not provided the default OpenAI endpoint will be used. + * @param defaultRequestSettings optional default settings for requests made using this model. */ - constructor(public readonly id: string, public model: string, public enableStreaming: boolean, public apiKey: () => string | undefined, public url: string | undefined) { } + constructor( + public readonly id: string, + public model: string, + public enableStreaming: boolean, + public apiKey: () => string | undefined, + public url: string | undefined, + public defaultRequestSettings?: { [key: string]: unknown } + ) { } + + protected getSettings(request: LanguageModelRequest): Record { + const settings = request.settings ? request.settings : this.defaultRequestSettings; + if (!settings) { + return {}; + } + return settings; + } async request(request: LanguageModelRequest, cancellationToken?: CancellationToken): Promise { + const settings = this.getSettings(request); const openai = this.initializeOpenAi(); if (this.isNonStreamingModel(this.model)) { @@ -80,14 +97,14 @@ export class OpenAiModel implements LanguageModel { stream: true, tools: tools, tool_choice: 'auto', - ...request.settings + ...settings }); } else { runner = openai.beta.chat.completions.stream({ model: this.model, messages: request.messages.map(toOpenAIMessage), stream: true, - ...request.settings + ...settings }); } cancellationToken?.onCancellationRequested(() => { @@ -141,10 +158,11 @@ export class OpenAiModel implements LanguageModel { } protected async handleNonStreamingRequest(openai: OpenAI, request: LanguageModelRequest): Promise { + const settings = this.getSettings(request); const response = await openai.chat.completions.create({ model: this.model, messages: request.messages.map(toOpenAIMessage), - ...request.settings + ...settings }); const message = response.choices[0].message; @@ -168,12 +186,13 @@ export class OpenAiModel implements LanguageModel { } protected async handleStructuredOutputRequest(openai: OpenAI, request: LanguageModelRequest): Promise { + const settings = this.getSettings(request); // TODO implement tool support for structured output (parse() seems to require different tool format) const result = await openai.beta.chat.completions.parse({ model: this.model, messages: request.messages.map(toOpenAIMessage), response_format: request.response_format, - ...request.settings + ...settings }); const message = result.choices[0].message; if (message.refusal || message.parsed === undefined) { diff --git a/packages/ai-openai/src/node/openai-language-models-manager-impl.ts b/packages/ai-openai/src/node/openai-language-models-manager-impl.ts index 4ccf77b0cc9f9..90d3c65e1fec6 100644 --- a/packages/ai-openai/src/node/openai-language-models-manager-impl.ts +++ b/packages/ai-openai/src/node/openai-language-models-manager-impl.ts @@ -45,23 +45,27 @@ export class OpenAiLanguageModelsManagerImpl implements OpenAiLanguageModelsMana } return undefined; }; + if (model) { if (!(model instanceof OpenAiModel)) { - console.warn(`Open AI: model ${modelDescription.id} is not an OpenAI model`); - continue; - } - if (!modelDescription.url) { - // This seems to be an official model, but it was already created. This can happen during the initializing of more than one frontend. - console.info(`Open AI: skip creating model ${modelDescription.id} because it already exists`); + console.warn(`OpenAI: model ${modelDescription.id} is not an OpenAI model`); continue; } - model.url = modelDescription.url; model.model = modelDescription.model; model.enableStreaming = modelDescription.enableStreaming; + model.url = modelDescription.url; model.apiKey = apiKeyProvider; + model.defaultRequestSettings = modelDescription.defaultRequestSettings; } else { this.languageModelRegistry.addLanguageModels([ - new OpenAiModel(modelDescription.id, modelDescription.model, modelDescription.enableStreaming, apiKeyProvider, modelDescription.url) + new OpenAiModel( + modelDescription.id, + modelDescription.model, + modelDescription.enableStreaming, + apiKeyProvider, + modelDescription.url, + modelDescription.defaultRequestSettings + ) ]); } }