diff --git a/ui/src/components/AI.tsx b/ui/src/components/AI.tsx
index 11378607c..1ec44da0e 100644
--- a/ui/src/components/AI.tsx
+++ b/ui/src/components/AI.tsx
@@ -22,7 +22,7 @@ const AI = () => {
// attach tasks to models
const modelsWithTasks = modelsInDB.map((model) => {
const modelWithTasks = { ...model } as any;
- if (model.id === defaultLLM.id) {
+ if (model.id === defaultLLM?.id) {
modelWithTasks.default = true;
// find tasks for default model
const matchingTasks = tasksInDB.filter(
diff --git a/ui/src/components/Login.tsx b/ui/src/components/Login.tsx
index dc515da25..e701da9e4 100644
--- a/ui/src/components/Login.tsx
+++ b/ui/src/components/Login.tsx
@@ -4,6 +4,7 @@ import { useContext, useEffect, useState } from "react";
import { useNavigate } from "react-router-dom";
import { Ad4minContext } from "../context/Ad4minContext";
import { AgentContext } from "../context/AgentContext";
+import { open } from "@tauri-apps/plugin-shell";
import "../index.css";
import Logo from "./Logo";
@@ -92,19 +93,19 @@ const Login = () => {
async function saveModels() {
if (await apiValid()) {
// add llm model
- const llm = { name: "LLM Model 1", modelType: "LLM" } as ModelInput;
- if (aiMode === "Local") {
- llm.local = {
- fileName: "solar_10_7b_instruct",
- tokenizerSource: "",
- modelParameters: "",
- };
- } else {
- llm.api = { baseUrl: apiUrl, apiKey, apiType: "OPEN_AI" };
+ if (aiMode !== "None") {
+ const llm = { name: "LLM Model 1", modelType: "LLM" } as ModelInput;
+ if (aiMode === "Local") {
+ llm.local = {
+ fileName: "solar_10_7b_instruct",
+ tokenizerSource: "",
+ modelParameters: "",
+ };
+ } else {
+ llm.api = { baseUrl: apiUrl, apiKey, apiType: "OPEN_AI" };
+ }
+ client!.ai.addModel(llm).then((modelId) => client!.ai.setDefaultModel("LLM", modelId));
}
- client!.ai
- .addModel(llm)
- .then((modelId) => client!.ai.setDefaultModel("LLM", modelId));
// add embedding model
client!.ai.addModel({
name: "bert",
@@ -415,13 +416,33 @@ const Login = () => {
style={{
textAlign: "center",
width: "100%",
- maxWidth: 500,
+ maxWidth: 570,
marginBottom: 40,
}}
>
-
- ADAM allows you to control the AI used for transcription, vector
- embedding, and LLM tasks.
+
+
+ Is your computer capabale of running Large Language Models locally?
+
+
+ Regardless of your choice here, we will always download and use small AI models
+ (such as open("https://huggingface.co/openai/whisper-small")}
+ style={{cursor: "pointer"}}
+ >Whisper small and an open("https://huggingface.co/Snowflake/snowflake-arctic-embed-xs")}
+ style={{cursor: "pointer"}}
+ >Embedding model)
+ to handle basic tasks on all devices.
+
+
+ When it comes to LLMs, it depends on you having either an Apple Silicon mac (M1 or better)
+ or an nVidia GPU.
+
+
+ Alternatively, you can configure ADAM to out-source LLM tasks to a remote API.
+ If you unsure, you can select "None" now and add, remove or change model settings
+ later-on in the AI tab.
@@ -440,8 +461,8 @@ const Login = () => {
Local
- Select Local if your device is capable or running large
- models locally.
+ Select Local if you have an M1 mac (or better)
+ or an nVidia GPU
@@ -459,7 +480,7 @@ const Login = () => {
Remote
- Select Remote to use an external API like OpenAI.
+ Select to use an external API like OpenAI or your own Ollama server.
@@ -477,11 +498,27 @@ const Login = () => {
None
- Select None if you'd prefer not use AI.
+ Select if you'd prefer NOT to use LLMs at all.
+ {aiMode === "Local" && (
+
+
+ This will download open("https://huggingface.co/TheBloke/SOLAR-10.7B-Instruct-v1.0-GGUF")}
+ style={{cursor: "pointer"}}
+ >SOLAR 10.7b instruct
+
+
+ )}
+
{aiMode === "Remote" && (
{
)}
+ {aiMode === "None" && (
+
+
+ Selecting None here and not having any LLM configured
+ might result in new Synergy features not working in Flux...
+
+
+ )}
+
setCurrentIndex(4)}>
Previous
diff --git a/ui/src/components/ModelModal.tsx b/ui/src/components/ModelModal.tsx
index edf7cb3b8..08c8957ad 100644
--- a/ui/src/components/ModelModal.tsx
+++ b/ui/src/components/ModelModal.tsx
@@ -27,10 +27,7 @@ const llmModels = [
const transcriptionModels = ["whisper"];
const embeddingModels = ["bert"];
-export default function ModelModal(props: {
- close: () => void;
- oldModel?: any;
-}) {
+export default function ModelModal(props: { close: () => void; oldModel?: any }) {
const { close, oldModel } = props;
const {
state: { client },
@@ -108,7 +105,12 @@ export default function ModelModal(props: {
};
}
if (oldModel) client!.ai.updateModel(oldModel.id, model);
- else client!.ai.addModel(model);
+ else {
+ const newModelId = await client!.ai.addModel(model);
+ // if no default LLM set, mark new model as default
+ const defaultLLM = await client!.ai.getDefaultModel("LLM");
+ if (!defaultLLM) client!.ai.setDefaultModel("LLM", newModelId);
+ }
close();
}
}