Skip to content
This repository has been archived by the owner on Sep 15, 2024. It is now read-only.

Commit

Permalink
Proxy route for Google AI API & Pass Context LLM Api Logic (#212)
Browse files Browse the repository at this point in the history
* Feat [UI/UX] [Next JS Router] Proxy route for Google AI API

- [+] chore(next.config.mjs): add proxy route for Google AI API
- [+] feat(next.config.mjs): add proxy route for Google AI API

* Refactor Google AI (Better LLM Logic) Pass Context

- [+] chore(google.ts): add interfaces for GoogleResponse, MessagePart, Message, and ModelConfig
- [+] feat(google.ts): update extractMessage method to handle gemini-pro response
- [+] feat(google.ts): update chat method to handle role in neighboring messages and model configuration
- [+] feat(google.ts): update path method to use template literals for endpoint

* Chore [Constants] [Google AI] Update Comment for LLM

- [+] chore(google.ts): fix typo in copyright notice
- [+] chore(constant.ts): add comment explaining the purpose of DEFAULT_SYSTEM_TEMPLATE constant

* Chore [UI/UX Front End] Comment out unused variable

- [+] chore(chat.ts): comment out unused variable 'modelStartsWithGemini'
- [+] chore(chat.ts): remove unnecessary condition for logging system prompts

* Refactor [Model Config] Now Support Inject System Prompt

- [+] fix(google.ts): add todo comment to fix tauri desktop app issue
- [+] fix(model-config.tsx): refactor conditional rendering of model config options

* Refactor [Model Config] Inject System Prompt

- [+] feat(model-config.tsx): add ModelProvider import from constant file
- [+] refactor(model-config.tsx): remove unused variable 'isGoogleAIModel'
- [+] refactor(model-config.tsx): replace condition 'isGoogleAIModel' with 'allModels'

* Feat JS Docs [LLM Google Api] module documentation comments

- [+] chore(google.ts): add module documentation comments
- [+] chore(google.ts): add copyright notice
- [+] chore(google.ts): add interface for GoogleResponse
- [+] chore(google.ts): add interface for MessagePart
- [+] chore(google.ts): add interface for Message
- [+] chore(google.ts): add interface for ModelConfig
- [+] chore(google.ts): add class documentation comments
- [+] chore(google.ts): add method documentation comments for extractMessage
- [+] chore(google.ts): add method documentation comments for chat
- [+] chore(google.ts): add method documentation comments for usage
- [+] chore(google.ts): add method documentation comments for models

* Fix [UI/UX] Trim Topic

- [+] fix(utils.ts): update trimTopic function to handle additional punctuation characters
- [+] chore(utils.ts): remove unused variable 'isApp'

* Chore [Constants] Update Knowledge Cut Off Date

- [+] chore(constant.ts): update KnowledgeCutOffDate for "gemini-pro" model

* Fix [UI/UX] [Next JS Router] Proxy route for Google AI API

- [+] fix(next.config.mjs): update source path for /api/proxy route to include "google"

* Fix [LLM Api] [Google AI] Client Router Path

- [+] fix(google.ts): import missing constants DEFAULT_API_HOST, DEFAULT_CORS_HOST, GEMINI_BASE_URL
- [+] chore(google.ts): add JSDoc comments to path() method
- [+] feat(google.ts): update path() method to handle routing requests through a CORS proxy for Tauri desktop app
  • Loading branch information
H0llyW00dzZ authored Dec 27, 2023
1 parent 343841f commit b9ed661
Show file tree
Hide file tree
Showing 6 changed files with 152 additions and 92 deletions.
125 changes: 107 additions & 18 deletions app/client/platforms/google.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
/**
* Interfaces and classes for interacting with Google's AI models through the Gemini Pro API.
* @module google
* // Copyright (c) 2023 H0llyW00dzZ
*/

import { DEFAULT_API_HOST, DEFAULT_CORS_HOST, GEMINI_BASE_URL, Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import {
Expand All @@ -9,43 +15,98 @@ import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import Locale from "../../locales";
import { getServerSideConfig } from "@/app/config/server";

// Define interfaces for your payloads and responses to ensure type safety.
/**
* Represents the response format received from Google's API.
*/
interface GoogleResponse {
candidates?: Array<{
content?: {
parts?: Array<{
text?: string;
}>;
};
}>;
error?: {
message?: string;
};
}

/**
* Represents a part of a message, typically containing text.
*/
interface MessagePart {
text: string;
}

/**
* Represents a full message, including the role of the sender and the message parts.
*/
interface Message {
role: string;
parts: MessagePart[];
}

/**
* Configuration for the AI model used within the chat method.
*/
interface ModelConfig {
temperature?: number;
max_tokens?: number;
top_p?: number;
// top_k?: number; // Uncomment and add to the interface if used.
model?: string;
}

/**
* The GeminiProApi class provides methods to interact with the Google AI via the Gemini Pro API.
* It implements the LLMApi interface.
*/
export class GeminiProApi implements LLMApi {
extractMessage(res: any) {
/**
* Extracts the message text from the GoogleResponse object.
* @param {GoogleResponse} res - The response object from Google's API.
* @returns {string} The extracted message text or error message.
*/
extractMessage(res: GoogleResponse): string {
console.log("[Response] gemini-pro response: ", res);

return (
res?.candidates?.at(0)?.content?.parts.at(0)?.text ||
res?.error?.message ||
res.candidates?.[0]?.content?.parts?.[0]?.text ||
res.error?.message ||
""
);
}
/**
* Sends a chat message to the Google API and handles the response.
* @param {ChatOptions} options - The chat options including messages and configuration.
* @returns {Promise<void>} A promise that resolves when the chat request is complete.
*/
async chat(options: ChatOptions): Promise<void> {
const messages = options.messages.map((v) => ({
const messages: Message[] = options.messages.map((v) => ({
role: v.role.replace("assistant", "model").replace("system", "user"),
parts: [{ text: v.content }],
}));

// google requires that role in neighboring messages must not be the same
for (let i = 0; i < messages.length - 1; ) {
// Check if current and next item both have the role "model"
if (messages[i].role === messages[i + 1].role) {
// Concatenate the 'parts' of the current and next item
messages[i].parts = messages[i].parts.concat(messages[i + 1].parts);
// Remove the next item
messages.splice(i + 1, 1);
} else {
// Move to the next item
i++;
}
}

const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
},
const appConfig = useAppConfig.getState().modelConfig;
const chatConfig = useChatStore.getState().currentSession().mask.modelConfig;
const modelConfig: ModelConfig = {
...appConfig,
...chatConfig,
model: options.config.model,
};

const requestPayload = {
contents: messages,
generationConfig: {
Expand All @@ -65,6 +126,7 @@ export class GeminiProApi implements LLMApi {
const shouldStream = false;
const controller = new AbortController();
options.onController?.(controller);

try {
const chatPath = this.path(Google.ChatPath);
const chatPayload = {
Expand Down Expand Up @@ -207,16 +269,43 @@ export class GeminiProApi implements LLMApi {
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
options.onError?.(e as Error);
options.onError?.(e instanceof Error ? e : new Error(String(e)));
}
}
/**
* Fetches the usage statistics of the LLM.
* @returns {Promise<LLMUsage>} A promise that resolves to the usage statistics.
*/
usage(): Promise<LLMUsage> {
throw new Error("Method not implemented.");
}
/**
* Fetches the available LLM models.
* @returns {Promise<LLMModel[]>} A promise that resolves to an array of LLM models.
*/
async models(): Promise<LLMModel[]> {
return [];
}
path(path: string): string {
return "/api/google/" + path;
/**
* Constructs the appropriate URL path for API requests.
*
* This is a temporary fix to address an issue where the Google AI services
* cannot be directly accessed from the Tauri desktop application. By routing
* requests through a CORS proxy, we work around the limitation that prevents
* direct API communication due to the desktop app's security constraints.
*
* @param {string} endpoint - The API endpoint that needs to be accessed.
* @returns {string} The fully constructed URL path for the API request.
*/
path(endpoint: string): string {
const isApp = !!getClientConfig()?.isApp;
// Use DEFAULT_CORS_HOST as the base URL if the client is a desktop app.
const basePath = isApp ? `${DEFAULT_CORS_HOST}/api/google` : '/api/google';

// Normalize the endpoint to prevent double slashes, but preserve "https://" if present.
const normalizedEndpoint = endpoint.startsWith('/') ? endpoint.substring(1) : endpoint;

return `${basePath}/${normalizedEndpoint}`;
}

}
100 changes: 30 additions & 70 deletions app/components/model-config.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,10 @@ import Locale from "../locales";
import { InputRange } from "./input-range";
import { ListItem, Select } from "./ui-lib";
import { useAllModels } from "../utils/hooks";
import { DEFAULT_SYSTEM_TEMPLATE } from "../constant";
import {
DEFAULT_SYSTEM_TEMPLATE,
ModelProvider,
} from "../constant";

export function ModelConfigList(props: {
modelConfig: ModelConfig;
Expand Down Expand Up @@ -196,50 +199,8 @@ export function ModelConfigList(props: {
></InputRange>
</ListItem>

{props.modelConfig.model === "gemini-pro" ? null : (
{allModels && (
<>
<ListItem
title={Locale.Settings.PresencePenalty.Title}
subTitle={Locale.Settings.PresencePenalty.SubTitle}
>
<InputRange
value={props.modelConfig.presence_penalty?.toFixed(1)}
min="-2"
max="2"
step="0.1"
onChange={(e) => {
props.updateConfig(
(config) =>
(config.presence_penalty =
ModalConfigValidator.presence_penalty(
e.currentTarget.valueAsNumber,
)),
);
}}
></InputRange>
</ListItem>

<ListItem
title={Locale.Settings.FrequencyPenalty.Title}
subTitle={Locale.Settings.FrequencyPenalty.SubTitle}
>
<InputRange
value={props.modelConfig.frequency_penalty?.toFixed(1)}
min="-2"
max="2"
step="0.1"
onChange={(e) => {
props.updateConfig(
(config) =>
(config.frequency_penalty =
ModalConfigValidator.frequency_penalty(
e.currentTarget.valueAsNumber,
)),
);
}}
></InputRange>
</ListItem>

<ListItem
title={Locale.Settings.InjectSystemPrompts.Title}
subTitle={Locale.Settings.InjectSystemPrompts.SubTitle}
Expand All @@ -248,37 +209,36 @@ export function ModelConfigList(props: {
type="checkbox"
checked={props.modelConfig.enableInjectSystemPrompts}
onChange={(e) =>
props.updateConfig(
(config) =>
(config.enableInjectSystemPrompts =
e.currentTarget.checked),
)
props.updateConfig((config) => {
// Use e.target to refer to the element that triggered the event
config.enableInjectSystemPrompts = e.target.checked;
})
}
></input>
/>
</ListItem>

{props.modelConfig.enableInjectSystemPrompts && (
<>
<ListItem
title={Locale.Settings.SystemPromptTemplate.Title}
subTitle={Locale.Settings.SystemPromptTemplate.SubTitle}
<ListItem
title={Locale.Settings.SystemPromptTemplate.Title}
subTitle={Locale.Settings.SystemPromptTemplate.SubTitle}
>
<Select
value={props.modelConfig.systemprompt.default}
onChange={(e) =>
props.updateConfig((config) => {
// Use e.target to refer to the element that triggered the event
config.systemprompt.default = e.target.value;
})
}
>
<Select
value={props.modelConfig.systemprompt.default}
onChange={(e) =>
props.updateConfig(
(config) => (config.systemprompt.default = e.currentTarget.value),
)
}
>
{customsystemprompts.map((prompt) => (
<option value={prompt.value} key={prompt.value}>
{prompt.label}
</option>
))}
</Select>
</ListItem>
</>
{customsystemprompts.map((prompt) => (
// Use a unique value for the key, not the array index
<option value={prompt.value} key={prompt.value}>
{prompt.label}
</option>
))}
</Select>
</ListItem>
)}
<ListItem
title={Locale.Settings.InputTemplate.Title}
Expand Down
5 changes: 5 additions & 0 deletions app/constant.ts
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,10 @@ export const Google = {
};

export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang
// In latest refactor for google ai (by H0llyW00dzZ), we can use this template to generate the default system message as pass context prompt
// otherwise, we can configure this by costumize the default system message in the settings page
// example configure this by costumize the default system message in the settings page
// just change a chatgpt and "OPENAI" to "GOOGLE" and "GEMINI-PRO"
export const DEFAULT_SYSTEM_TEMPLATE = `
You are ChatGPT, a large language model trained by OpenAI.
Knowledge cutoff: {{cutoff}}
Expand All @@ -126,6 +130,7 @@ export const KnowledgeCutOffDate: Record<string, string> = {
default: "2021-09",
"gpt-4-1106-preview": "2023-04",
"gpt-4-vision-preview": "2023-04",
"gemini-pro": "2023-12", // this need to changed which is the latest date are correctly
};

export const DEFAULT_MODELS = [
Expand Down
6 changes: 3 additions & 3 deletions app/store/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ export const useChatStore = createPersistStore(

// system prompts, to get close to OpenAI Web ChatGPT
const modelStartsWithDallE = modelConfig.model.startsWith("dall-e");
const modelStartsWithGemini = modelConfig.model.startsWith("gemini-pro");
//const modelStartsWithGemini = modelConfig.model.startsWith("gemini-pro");
const shouldInjectSystemPrompts = modelConfig.enableInjectSystemPrompts;
let systemPrompts: ChatMessage[] = []; // Define the type for better type checking
if (shouldInjectSystemPrompts) {
Expand All @@ -406,8 +406,8 @@ export const useChatStore = createPersistStore(
}

// Log messages about system prompts based on conditions
if (modelStartsWithDallE || modelStartsWithGemini) {
console.log("[Global System Prompt] Dall-e or Gemini Models no need this");
if (modelStartsWithDallE) {
console.log("[Global System Prompt] Dall-e no need this");
} else if (shouldInjectSystemPrompts) {
console.log(
"[Global System Prompt] ",
Expand Down
2 changes: 1 addition & 1 deletion app/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ export function trimTopic(topic: string) {
// Fix an issue where double quotes still show in the Indonesian language
// This will remove the specified punctuation from the end of the string
// and also trim quotes from both the start and end if they exist.
return topic.replace(/^["“”]+|["“”]+$/g, "").replace(/[,。!?”“"、,.!?]*$/, "");
return topic.replace(/^["“”*]+|["“”*]+$/g, "").replace(/[,。!?”“"、,.!?*]*$/, ""); // fix for google ai
}

const isApp = !!getClientConfig()?.isApp;
Expand Down
6 changes: 6 additions & 0 deletions next.config.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,12 @@ if (mode !== "export") {
source: "/sharegpt",
destination: "https://sharegpt.com/api/conversations",
},
// google ai for gemini-pro
// it will syncing the router in tauri desktop app
{
source: "/api/proxy/google/:path*",
destination: "https://generativelanguage.googleapis.com/:path*",
},
];

return {
Expand Down

0 comments on commit b9ed661

Please sign in to comment.