From 5905b7cacbaca86d694ffe29d6c9669d37cdbf87 Mon Sep 17 00:00:00 2001 From: jhweir Date: Wed, 18 Dec 2024 13:37:37 +0000 Subject: [PATCH 1/6] Prevent LLM model being added if user selects "None" at AI options --- ui/src/components/Login.tsx | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/ui/src/components/Login.tsx b/ui/src/components/Login.tsx index dc515da25..fa716a938 100644 --- a/ui/src/components/Login.tsx +++ b/ui/src/components/Login.tsx @@ -92,19 +92,19 @@ const Login = () => { async function saveModels() { if (await apiValid()) { // add llm model - const llm = { name: "LLM Model 1", modelType: "LLM" } as ModelInput; - if (aiMode === "Local") { - llm.local = { - fileName: "solar_10_7b_instruct", - tokenizerSource: "", - modelParameters: "", - }; - } else { - llm.api = { baseUrl: apiUrl, apiKey, apiType: "OPEN_AI" }; + if (aiMode !== "None") { + const llm = { name: "LLM Model 1", modelType: "LLM" } as ModelInput; + if (aiMode === "Local") { + llm.local = { + fileName: "solar_10_7b_instruct", + tokenizerSource: "", + modelParameters: "", + }; + } else { + llm.api = { baseUrl: apiUrl, apiKey, apiType: "OPEN_AI" }; + } + client!.ai.addModel(llm).then((modelId) => client!.ai.setDefaultModel("LLM", modelId)); } - client!.ai - .addModel(llm) - .then((modelId) => client!.ai.setDefaultModel("LLM", modelId)); // add embedding model client!.ai.addModel({ name: "bert", From b96bd5732667cd797367041067676ca41bcf4e27 Mon Sep 17 00:00:00 2001 From: Nicolas Luck Date: Wed, 18 Dec 2024 15:15:03 +0100 Subject: [PATCH 2/6] Fix AI tab in launcher when there is no default LLM --- ui/src/components/AI.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/src/components/AI.tsx b/ui/src/components/AI.tsx index 11378607c..1ec44da0e 100644 --- a/ui/src/components/AI.tsx +++ b/ui/src/components/AI.tsx @@ -22,7 +22,7 @@ const AI = () => { // attach tasks to models const modelsWithTasks = modelsInDB.map((model) => { const modelWithTasks = { ...model } as any; - if (model.id === defaultLLM.id) { + if (model.id === defaultLLM?.id) { modelWithTasks.default = true; // find tasks for default model const matchingTasks = tasksInDB.filter( From a2dbd9d598d60e61af53310a5030464befb698aa Mon Sep 17 00:00:00 2001 From: Nicolas Luck Date: Wed, 18 Dec 2024 16:45:31 +0100 Subject: [PATCH 3/6] AI setup wizard wording and more information about models used --- ui/src/components/Login.tsx | 67 ++++++++++++++++++++++++++++++++----- 1 file changed, 59 insertions(+), 8 deletions(-) diff --git a/ui/src/components/Login.tsx b/ui/src/components/Login.tsx index fa716a938..e701da9e4 100644 --- a/ui/src/components/Login.tsx +++ b/ui/src/components/Login.tsx @@ -4,6 +4,7 @@ import { useContext, useEffect, useState } from "react"; import { useNavigate } from "react-router-dom"; import { Ad4minContext } from "../context/Ad4minContext"; import { AgentContext } from "../context/AgentContext"; +import { open } from "@tauri-apps/plugin-shell"; import "../index.css"; import Logo from "./Logo"; @@ -415,13 +416,33 @@ const Login = () => { style={{ textAlign: "center", width: "100%", - maxWidth: 500, + maxWidth: 570, marginBottom: 40, }} > - - ADAM allows you to control the AI used for transcription, vector - embedding, and LLM tasks. + + + Is your computer capabale of running Large Language Models locally? + + + Regardless of your choice here, we will always download and use small AI models + (such as open("https://huggingface.co/openai/whisper-small")} + style={{cursor: "pointer"}} + >Whisper small and an open("https://huggingface.co/Snowflake/snowflake-arctic-embed-xs")} + style={{cursor: "pointer"}} + >Embedding model) + to handle basic tasks on all devices. +

+

+ When it comes to LLMs, it depends on you having either an Apple Silicon mac (M1 or better) + or an nVidia GPU. +

+

+ Alternatively, you can configure ADAM to out-source LLM tasks to a remote API. + If you unsure, you can select "None" now and add, remove or change model settings + later-on in the AI tab.
@@ -440,8 +461,8 @@ const Login = () => { Local
- Select Local if your device is capable or running large - models locally. + Select Local if you have an M1 mac (or better) + or an nVidia GPU @@ -459,7 +480,7 @@ const Login = () => { Remote - Select Remote to use an external API like OpenAI. + Select to use an external API like OpenAI or your own Ollama server. @@ -477,11 +498,27 @@ const Login = () => { None - Select None if you'd prefer not use AI. + Select if you'd prefer NOT to use LLMs at all. + {aiMode === "Local" && ( + + + This will download open("https://huggingface.co/TheBloke/SOLAR-10.7B-Instruct-v1.0-GGUF")} + style={{cursor: "pointer"}} + >SOLAR 10.7b instruct + + + )} + {aiMode === "Remote" && ( { )} + {aiMode === "None" && ( + + + Selecting None here and not having any LLM configured + might result in new Synergy features not working in Flux... + + + )} + setCurrentIndex(4)}> Previous From b6328cc1acca0b0dbd642c627c673b1ae40ab9f4 Mon Sep 17 00:00:00 2001 From: jhweir Date: Thu, 19 Dec 2024 09:59:56 +0000 Subject: [PATCH 4/6] Automatically set new LLM model as default when saved --- ui/src/components/ModelModal.tsx | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/ui/src/components/ModelModal.tsx b/ui/src/components/ModelModal.tsx index edf7cb3b8..ef002d1f5 100644 --- a/ui/src/components/ModelModal.tsx +++ b/ui/src/components/ModelModal.tsx @@ -27,10 +27,7 @@ const llmModels = [ const transcriptionModels = ["whisper"]; const embeddingModels = ["bert"]; -export default function ModelModal(props: { - close: () => void; - oldModel?: any; -}) { +export default function ModelModal(props: { close: () => void; oldModel?: any }) { const { close, oldModel } = props; const { state: { client }, @@ -108,7 +105,7 @@ export default function ModelModal(props: { }; } if (oldModel) client!.ai.updateModel(oldModel.id, model); - else client!.ai.addModel(model); + else client!.ai.addModel(model).then((modelId) => client!.ai.setDefaultModel("LLM", modelId)); close(); } } From f2638f8fc04d26de875aaccbb394af860dff2853 Mon Sep 17 00:00:00 2001 From: jhweir Date: Thu, 19 Dec 2024 14:05:43 +0000 Subject: [PATCH 5/6] Only set new LLM model as default if no default previously set --- ui/src/components/ModelModal.tsx | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ui/src/components/ModelModal.tsx b/ui/src/components/ModelModal.tsx index ef002d1f5..6c8a00134 100644 --- a/ui/src/components/ModelModal.tsx +++ b/ui/src/components/ModelModal.tsx @@ -105,7 +105,13 @@ export default function ModelModal(props: { close: () => void; oldModel?: any }) }; } if (oldModel) client!.ai.updateModel(oldModel.id, model); - else client!.ai.addModel(model).then((modelId) => client!.ai.setDefaultModel("LLM", modelId)); + else { + client!.ai.addModel(model).then(async (newModelId) => { + // if no default LLM set, mark new model as default + const defaultLLM = await client!.ai.getDefaultModel("LLM"); + if (!defaultLLM) client!.ai.setDefaultModel("LLM", newModelId); + }); + } close(); } } From 7d1ed3c96b74150339896968cd79b36b5e3d7ce9 Mon Sep 17 00:00:00 2001 From: jhweir Date: Thu, 19 Dec 2024 14:41:26 +0000 Subject: [PATCH 6/6] Add model async await syntax change --- ui/src/components/ModelModal.tsx | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ui/src/components/ModelModal.tsx b/ui/src/components/ModelModal.tsx index 6c8a00134..08c8957ad 100644 --- a/ui/src/components/ModelModal.tsx +++ b/ui/src/components/ModelModal.tsx @@ -106,11 +106,10 @@ export default function ModelModal(props: { close: () => void; oldModel?: any }) } if (oldModel) client!.ai.updateModel(oldModel.id, model); else { - client!.ai.addModel(model).then(async (newModelId) => { - // if no default LLM set, mark new model as default - const defaultLLM = await client!.ai.getDefaultModel("LLM"); - if (!defaultLLM) client!.ai.setDefaultModel("LLM", newModelId); - }); + const newModelId = await client!.ai.addModel(model); + // if no default LLM set, mark new model as default + const defaultLLM = await client!.ai.getDefaultModel("LLM"); + if (!defaultLLM) client!.ai.setDefaultModel("LLM", newModelId); } close(); }