-
Notifications
You must be signed in to change notification settings - Fork 15
/
llm.ts
47 lines (39 loc) · 1.16 KB
/
llm.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import { ChatOpenAI } from "langchain/chat_models/openai";
import { HumanMessage } from "langchain/schema";
// LLM API:
export type Completion = { text: string; id: string; model: string } | { error: string };
async function llmRequest(prompt: string, config: any): Promise<Completion> {
const baseOptions = {
headers: {
"Helicone-Auth": `Bearer ${process.env.HELICONE_API_KEY}`,
...(process.env.OPENAI_CACHE_ENABLED && {
"Helicone-Cache-Enabled": "true",
"Helicone-Cache-Bucket-Max-Size": "1",
}),
},
};
const chat = new ChatOpenAI(
{
openAIApiKey: process.env.OPENAI_API_KEY,
...config
},
{
basePath: process.env.OPENAI_BASE_URL,
baseOptions: baseOptions,
}
);
try {
const result = await chat.generate([
[new HumanMessage(prompt)]
]);
return {
text: result.generations[0][0].text,
id: "", // This is currently unsupported by Langchain.
model: result.llmOutput?.modelName || ""
};
} catch (error: any) {
// When any other error occurs:
throw new Error(`Failed to make request. Error message: ${error.message}`);
}
}
export { llmRequest }