diff --git a/README.md b/README.md index 89dfc493b..1493376ce 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ You can import in Deno via: ```ts -import OpenAI from 'https://raw.githubusercontent.com/openai/openai-node/v4.14.1-deno/mod.ts'; +import OpenAI from 'https://raw.githubusercontent.com/openai/openai-node/v4.14.2-deno/mod.ts'; ``` @@ -102,6 +102,119 @@ Documentation for each method, request param, and response field are available i > [!IMPORTANT] > Previous versions of this SDK used a `Configuration` class. See the [v3 to v4 migration guide](https://github.com/openai/openai-node/discussions/217). +### Streaming responses + +This library provides several conveniences for streaming chat completions, for example: + +```ts +import OpenAI from 'openai'; + +const openai = new OpenAI(); + +async function main() { + const stream = await openai.beta.chat.completions.stream({ + model: 'gpt-4', + messages: [{ role: 'user', content: 'Say this is a test' }], + stream: true, + }); + + stream.on('content', (delta, snapshot) => { + process.stdout.write(delta); + }); + + // or, equivalently: + for await (const part of stream) { + process.stdout.write(part.choices[0]?.delta?.content || ''); + } + + const chatCompletion = await stream.finalChatCompletion(); + console.log(chatCompletion); // {id: "…", choices: […], …} +} + +main(); +``` + +Streaming with `openai.beta.chat.completions.stream({…})` exposes +[various helpers for your convenience](helpers.md#events) including event handlers and promises. + +Alternatively, you can use `openai.chat.completions.create({ stream: true, … })` +which only returns an async iterable of the chunks in the stream and thus uses less memory +(it does not build up a final chat completion object for you). + +If you need to cancel a stream, you can `break` from a `for await` loop or call `stream.abort()`. + +### Automated function calls + +We provide a `openai.beta.chat.completions.runFunctions({…})` convenience helper for using function calls +with the `/chat/completions` endpoint which automatically calls the JavaScript functions you provide +and sends their results back to the `/chat/completions` endpoint, +looping as long as the model requests function calls. + +If you pass a `parse` function, it will automatically parse the `arguments` for you and returns any parsing errors to the model to attempt auto-recovery. Otherwise, the args will be passed to the function you provide as a string. + +If you pass `function_call: {name: …}` instead of `auto`, it returns immediately after calling that function (and only loops to auto-recover parsing errors). + +```ts +import OpenAI from 'openai'; + +const client = new OpenAI(); + +async function main() { + const runner = client.beta.chat.completions + .runFunctions({ + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'How is the weather this week?' }], + functions: [ + { + function: getCurrentLocation, + parameters: { type: 'object', properties: {} }, + }, + { + function: getWeather, + parse: JSON.parse, // or use a validation library like zod for typesafe parsing. + parameters: { + type: 'object', + properties: { + location: { type: 'string' }, + }, + }, + }, + ], + }) + .on('message', (message) => console.log(message)); + + const finalContent = await runner.finalContent(); + console.log(); + console.log('Final content:', finalContent); +} + +async function getCurrentLocation() { + return 'Boston'; // Simulate lookup +} + +async function getWeather(args: { location: string }) { + const { location } = args; + // … do lookup … + return { temperature, precipitation }; +} + +main(); + +// {role: "user", content: "How's the weather this week?"} +// {role: "assistant", function_call: "getCurrentLocation", arguments: "{}"} +// {role: "function", name: "getCurrentLocation", content: "Boston"} +// {role: "assistant", function_call: "getWeather", arguments: '{"location": "Boston"}'} +// {role: "function", name: "getWeather", content: '{"temperature": "50degF", "preciptation": "high"}'} +// {role: "assistant", content: "It's looking cold and rainy - you might want to wear a jacket!"} +// +// Final content: "It's looking cold and rainy - you might want to wear a jacket!" +``` + +Like with `.stream()`, we provide a variety of [helpers and events](helpers.md#events). + +Read more about various examples such as with integrating with [zod](helpers.md#integrate-with-zod), +[next.js](helpers.md#integrate-wtih-next-js), and [proxying a stream to the browser](helpers.md#proxy-streaming to-a-browser). + ## File Uploads Request parameters that correspond to file uploads can be passed in many different forms: diff --git a/api.md b/api.md index 7285097f2..00ba41ec2 100644 --- a/api.md +++ b/api.md @@ -156,3 +156,14 @@ Methods: - client.fineTunes.list() -> FineTunesPage - client.fineTunes.cancel(fineTuneId) -> FineTune - client.fineTunes.listEvents(fineTuneId, { ...params }) -> FineTuneEventsListResponse + +# Beta + +## Chat + +### Completions + +Methods: + +- client.beta.chat.completions.runFunctions(body, options?) -> ChatCompletionRunner | ChatCompletionStreamingRunner +- client.beta.chat.completions.stream(body, options?) -> ChatCompletionStream diff --git a/ecosystem-tests/node-ts-cjs-auto/tests/test.ts b/ecosystem-tests/node-ts-cjs-auto/tests/test.ts index ea6d0a76b..b7ab308cb 100644 --- a/ecosystem-tests/node-ts-cjs-auto/tests/test.ts +++ b/ecosystem-tests/node-ts-cjs-auto/tests/test.ts @@ -1,4 +1,4 @@ -import OpenAI, { toFile } from 'openai'; +import OpenAI, { APIUserAbortError, toFile } from 'openai'; import { TranscriptionCreateParams } from 'openai/resources/audio/transcriptions'; import fetch from 'node-fetch'; import { File as FormDataFile, Blob as FormDataBlob } from 'formdata-node'; @@ -68,6 +68,92 @@ it(`streaming works`, async function () { expect(chunks.map((c) => c.choices[0]?.delta.content || '').join('')).toBeSimilarTo('This is a test', 10); }); +it(`ChatCompletionStream works`, async function () { + const chunks: OpenAI.Chat.ChatCompletionChunk[] = []; + const contents: [string, string][] = []; + const messages: OpenAI.Chat.ChatCompletionMessage[] = []; + const chatCompletions: OpenAI.Chat.ChatCompletion[] = []; + let finalContent: string | undefined; + let finalMessage: OpenAI.Chat.ChatCompletionMessage | undefined; + let finalChatCompletion: OpenAI.Chat.ChatCompletion | undefined; + + const stream = client.beta.chat.completions + .stream({ + model: 'gpt-4', + messages: [{ role: 'user', content: 'Say this is a test' }], + }) + .on('chunk', (chunk) => chunks.push(chunk)) + .on('content', (delta, snapshot) => contents.push([delta, snapshot])) + .on('message', (message) => messages.push(message)) + .on('chatCompletion', (completion) => chatCompletions.push(completion)) + .on('finalContent', (content) => (finalContent = content)) + .on('finalMessage', (message) => (finalMessage = message)) + .on('finalChatCompletion', (completion) => (finalChatCompletion = completion)); + const content = await stream.finalContent(); + + expect(content).toBeSimilarTo('This is a test', 10); + expect(chunks.length).toBeGreaterThan(0); + expect(contents.length).toBeGreaterThan(0); + for (const chunk of chunks) { + expect(chunk.id).toEqual(finalChatCompletion?.id); + expect(chunk.created).toEqual(finalChatCompletion?.created); + expect(chunk.model).toEqual(finalChatCompletion?.model); + } + expect(finalContent).toEqual(content); + expect(contents.at(-1)?.[1]).toEqual(content); + expect(finalMessage?.content).toEqual(content); + expect(finalChatCompletion?.choices?.[0]?.message.content).toEqual(content); + expect(messages).toEqual([finalMessage]); + expect(chatCompletions).toEqual([finalChatCompletion]); + expect(await stream.finalContent()).toEqual(content); + expect(await stream.finalMessage()).toEqual(finalMessage); + expect(await stream.finalChatCompletion()).toEqual(finalChatCompletion); +}); + +it(`aborting ChatCompletionStream works`, async function () { + const chunks: OpenAI.Chat.ChatCompletionChunk[] = []; + const contents: [string, string][] = []; + const messages: OpenAI.Chat.ChatCompletionMessage[] = []; + const chatCompletions: OpenAI.Chat.ChatCompletion[] = []; + let finalContent: string | undefined; + let finalMessage: OpenAI.Chat.ChatCompletionMessage | undefined; + let finalChatCompletion: OpenAI.Chat.ChatCompletion | undefined; + let emittedError: any; + let caughtError: any; + const controller = new AbortController(); + const stream = client.beta.chat.completions + .stream( + { + model: 'gpt-4', + messages: [{ role: 'user', content: 'Say this is a test' }], + }, + { signal: controller.signal }, + ) + .on('error', (e) => (emittedError = e)) + .on('chunk', (chunk) => chunks.push(chunk)) + .on('content', (delta, snapshot) => { + contents.push([delta, snapshot]); + controller.abort(); + }) + .on('message', (message) => messages.push(message)) + .on('chatCompletion', (completion) => chatCompletions.push(completion)) + .on('finalContent', (content) => (finalContent = content)) + .on('finalMessage', (message) => (finalMessage = message)) + .on('finalChatCompletion', (completion) => (finalChatCompletion = completion)); + try { + await stream.finalContent(); + } catch (error) { + caughtError = error; + } + expect(caughtError).toBeInstanceOf(APIUserAbortError); + expect(finalContent).toBeUndefined(); + expect(finalMessage).toBeUndefined(); + expect(finalChatCompletion).toBeUndefined(); + expect(chatCompletions).toEqual([]); + expect(chunks.length).toBeGreaterThan(0); + expect(contents.length).toBeGreaterThan(0); +}); + it('handles formdata-node File', async function () { const file = await fetch(url) .then((x) => x.arrayBuffer()) diff --git a/examples/.gitignore b/examples/.gitignore new file mode 100644 index 000000000..a8669b19e --- /dev/null +++ b/examples/.gitignore @@ -0,0 +1,2 @@ +yarn.lock +node_modules diff --git a/examples/function-call-diy.ts b/examples/function-call-diy.ts new file mode 100755 index 000000000..ce12431b0 --- /dev/null +++ b/examples/function-call-diy.ts @@ -0,0 +1,142 @@ +#!/usr/bin/env -S npm run tsn -T + +import OpenAI from 'openai'; +import { ChatCompletionMessage, ChatCompletionMessageParam } from 'openai/resources/chat'; + +// gets API Key from environment variable OPENAI_API_KEY +const openai = new OpenAI(); + +const functions: OpenAI.Chat.ChatCompletionCreateParams.Function[] = [ + { + name: 'list', + description: 'list queries books by genre, and returns a list of names of books', + parameters: { + type: 'object', + properties: { + genre: { type: 'string', enum: ['mystery', 'nonfiction', 'memoir', 'romance', 'historical'] }, + }, + }, + }, + { + name: 'search', + description: 'search queries books by their name and returns a list of book names and their ids', + parameters: { + type: 'object', + properties: { + name: { type: 'string' }, + }, + }, + }, + { + name: 'get', + description: + "get returns a book's detailed information based on the id of the book. Note that this does not accept names, and only IDs, which you can get by using search.", + parameters: { + type: 'object', + properties: { + id: { type: 'string' }, + }, + }, + }, +]; + +async function callFunction(function_call: ChatCompletionMessage.FunctionCall): Promise { + const args = JSON.parse(function_call.arguments!); + switch (function_call.name) { + case 'list': + return await list(args['genre']); + + case 'search': + return await search(args['name']); + + case 'get': + return await get(args['id']); + + default: + throw new Error('No function found'); + } +} + +async function main() { + const messages: ChatCompletionMessageParam[] = [ + { + role: 'system', + content: + 'Please use our book database, which you can access using functions to answer the following questions.', + }, + { + role: 'user', + content: + 'I really enjoyed reading To Kill a Mockingbird, could you recommend me a book that is similar and tell me why?', + }, + ]; + console.log(messages[0]); + console.log(messages[1]); + console.log(); + + while (true) { + const completion = await openai.chat.completions.create({ + model: 'gpt-3.5-turbo', + messages, + functions: functions, + }); + + const message = completion.choices[0]!.message; + messages.push(message); + console.log(message); + + // If there is no function call, we're done and can exit this loop + if (!message.function_call) { + return; + } + + // If there is a function call, we generate a new message with the role 'function'. + const result = await callFunction(message.function_call); + const newMessage = { + role: 'function' as const, + name: message.function_call.name!, + content: JSON.stringify(result), + }; + messages.push(newMessage); + + console.log(newMessage); + console.log(); + } +} + +const db = [ + { + id: 'a1', + name: 'To Kill a Mockingbird', + genre: 'historical', + description: `Compassionate, dramatic, and deeply moving, "To Kill A Mockingbird" takes readers to the roots of human behavior - to innocence and experience, kindness and cruelty, love and hatred, humor and pathos. Now with over 18 million copies in print and translated into forty languages, this regional story by a young Alabama woman claims universal appeal. Harper Lee always considered her book to be a simple love story. Today it is regarded as a masterpiece of American literature.`, + }, + { + id: 'a2', + name: 'All the Light We Cannot See', + genre: 'historical', + description: `In a mining town in Germany, Werner Pfennig, an orphan, grows up with his younger sister, enchanted by a crude radio they find that brings them news and stories from places they have never seen or imagined. Werner becomes an expert at building and fixing these crucial new instruments and is enlisted to use his talent to track down the resistance. Deftly interweaving the lives of Marie-Laure and Werner, Doerr illuminates the ways, against all odds, people try to be good to one another.`, + }, + { + id: 'a3', + name: 'Where the Crawdads Sing', + genre: 'historical', + description: `For years, rumors of the “Marsh Girl” haunted Barkley Cove, a quiet fishing village. Kya Clark is barefoot and wild; unfit for polite society. So in late 1969, when the popular Chase Andrews is found dead, locals immediately suspect her. + +But Kya is not what they say. A born naturalist with just one day of school, she takes life's lessons from the land, learning the real ways of the world from the dishonest signals of fireflies. But while she has the skills to live in solitude forever, the time comes when she yearns to be touched and loved. Drawn to two young men from town, who are each intrigued by her wild beauty, Kya opens herself to a new and startling world—until the unthinkable happens.`, + }, +]; + +async function list(genre: string) { + return db.filter((item) => item.genre === genre).map((item) => ({ name: item.name, id: item.id })); +} + +async function search(name: string) { + return db.filter((item) => item.name.includes(name)).map((item) => ({ name: item.name, id: item.id })); +} + +async function get(id: string) { + return db.find((item) => item.id === id)!; +} + +main(); diff --git a/examples/function-call-helpers-zod.ts b/examples/function-call-helpers-zod.ts new file mode 100755 index 000000000..f783aee08 --- /dev/null +++ b/examples/function-call-helpers-zod.ts @@ -0,0 +1,112 @@ +#!/usr/bin/env -S npm run tsn -T + +import OpenAI from 'openai'; +import { ZodSchema, z } from 'zod'; +import { zodToJsonSchema } from 'zod-to-json-schema'; + +const openai = new OpenAI(); + +const ListParams = z.object({ + genre: z.enum(['mystery', 'nonfiction', 'memoir', 'romance', 'historical']), +}); + +const SearchParams = z.object({ + name: z.string(), +}); + +const GetParams = z.object({ + id: z.string(), +}); + +const functions = [ + { + name: 'list', + description: 'list queries books by genre, and returns a list of names of books', + parameters: zodToJsonSchema(ListParams), + parse: zodParseJSON(ListParams), + function: list, + }, + { + name: 'search', + description: 'search queries books by their name and returns a list of book names and their ids', + parameters: zodToJsonSchema(SearchParams), + parse: zodParseJSON(SearchParams), + function: search, + }, + { + name: 'get', + description: + "get returns a book's detailed information based on the id of the book. Note that this does not accept names, and only IDs, which you can get by using search.", + parameters: zodToJsonSchema(GetParams), + parse: zodParseJSON(GetParams), + function: get, + }, +] as const; + +async function main() { + const runner = await openai.beta.chat.completions + .runFunctions({ + model: 'gpt-3.5-turbo', + messages: [ + { + role: 'system', + content: + 'Please use our book database, which you can access using functions to answer the following questions.', + }, + { + role: 'user', + content: + 'I really enjoyed reading To Kill a Mockingbird, could you recommend me a book that is similar and tell me why?', + }, + ], + functions, + }) + .on('message', (msg) => console.log(msg)) + .on('content', (diff) => process.stdout.write(diff)); + + const result = await runner.finalChatCompletion(); + console.log(result); + + console.log(); + console.log(runner.messages); +} + +const db = [ + { + id: 'a1', + name: 'To Kill a Mockingbird', + genre: 'historical', + description: `Compassionate, dramatic, and deeply moving, "To Kill A Mockingbird" takes readers to the roots of human behavior - to innocence and experience, kindness and cruelty, love and hatred, humor and pathos. Now with over 18 million copies in print and translated into forty languages, this regional story by a young Alabama woman claims universal appeal. Harper Lee always considered her book to be a simple love story. Today it is regarded as a masterpiece of American literature.`, + }, + { + id: 'a2', + name: 'All the Light We Cannot See', + genre: 'historical', + description: `In a mining town in Germany, Werner Pfennig, an orphan, grows up with his younger sister, enchanted by a crude radio they find that brings them news and stories from places they have never seen or imagined. Werner becomes an expert at building and fixing these crucial new instruments and is enlisted to use his talent to track down the resistance. Deftly interweaving the lives of Marie-Laure and Werner, Doerr illuminates the ways, against all odds, people try to be good to one another.`, + }, + { + id: 'a3', + name: 'Where the Crawdads Sing', + genre: 'historical', + description: `For years, rumors of the “Marsh Girl” haunted Barkley Cove, a quiet fishing village. Kya Clark is barefoot and wild; unfit for polite society. So in late 1969, when the popular Chase Andrews is found dead, locals immediately suspect her. +But Kya is not what they say. A born naturalist with just one day of school, she takes life's lessons from the land, learning the real ways of the world from the dishonest signals of fireflies. But while she has the skills to live in solitude forever, the time comes when she yearns to be touched and loved. Drawn to two young men from town, who are each intrigued by her wild beauty, Kya opens herself to a new and startling world—until the unthinkable happens.`, + }, +]; + +async function list({ genre }: z.infer) { + return db.filter((item) => item.genre === genre).map((item) => ({ name: item.name, id: item.id })); +} + +async function search({ name }: z.infer) { + return db.filter((item) => item.name.includes(name)).map((item) => ({ name: item.name, id: item.id })); +} + +async function get({ id }: z.infer) { + return db.find((item) => item.id === id)!; +} + +function zodParseJSON(schema: ZodSchema) { + return (input: string): T => schema.parse(JSON.parse(input)); +} + +main(); diff --git a/examples/function-call-helpers.ts b/examples/function-call-helpers.ts new file mode 100755 index 000000000..48e2afd62 --- /dev/null +++ b/examples/function-call-helpers.ts @@ -0,0 +1,110 @@ +#!/usr/bin/env -S npm run tsn -T + +import OpenAI from 'openai'; + +// gets API Key from environment variable OPENAI_API_KEY +const openai = new OpenAI(); + +const functions = [ + { + name: 'list', + description: 'list queries books by genre, and returns a list of names of books', + parameters: { + type: 'object', + properties: { + genre: { type: 'string', enum: ['mystery', 'nonfiction', 'memoir', 'romance', 'historical'] }, + }, + }, + function: list, + parse: JSON.parse, + }, + { + name: 'search', + description: 'search queries books by their name and returns a list of book names and their ids', + parameters: { + type: 'object', + properties: { + name: { type: 'string' }, + }, + }, + function: search, + parse: JSON.parse, + }, + { + name: 'get', + description: + "get returns a book's detailed information based on the id of the book. Note that this does not accept names, and only IDs, which you can get by using search.", + parameters: { + type: 'object', + properties: { + id: { type: 'string' }, + }, + }, + function: get, + parse: JSON.parse, + }, +]; + +async function main() { + const runner = await openai.beta.chat.completions + .runFunctions({ + model: 'gpt-3.5-turbo', + messages: [ + { + role: 'system', + content: + 'Please use our book database, which you can access using functions to answer the following questions.', + }, + { + role: 'user', + content: + 'I really enjoyed reading To Kill a Mockingbird, could you recommend me a book that is similar and tell me why?', + }, + ], + functions, + }) + .on('message', (msg) => console.log(msg)) + .on('content', (diff) => process.stdout.write(diff)); + + const result = await runner.finalChatCompletion(); + console.log(result); + + console.log(); + console.log(runner.messages); +} + +const db = [ + { + id: 'a1', + name: 'To Kill a Mockingbird', + genre: 'historical', + description: `Compassionate, dramatic, and deeply moving, "To Kill A Mockingbird" takes readers to the roots of human behavior - to innocence and experience, kindness and cruelty, love and hatred, humor and pathos. Now with over 18 million copies in print and translated into forty languages, this regional story by a young Alabama woman claims universal appeal. Harper Lee always considered her book to be a simple love story. Today it is regarded as a masterpiece of American literature.`, + }, + { + id: 'a2', + name: 'All the Light We Cannot See', + genre: 'historical', + description: `In a mining town in Germany, Werner Pfennig, an orphan, grows up with his younger sister, enchanted by a crude radio they find that brings them news and stories from places they have never seen or imagined. Werner becomes an expert at building and fixing these crucial new instruments and is enlisted to use his talent to track down the resistance. Deftly interweaving the lives of Marie-Laure and Werner, Doerr illuminates the ways, against all odds, people try to be good to one another.`, + }, + { + id: 'a3', + name: 'Where the Crawdads Sing', + genre: 'historical', + description: `For years, rumors of the “Marsh Girl” haunted Barkley Cove, a quiet fishing village. Kya Clark is barefoot and wild; unfit for polite society. So in late 1969, when the popular Chase Andrews is found dead, locals immediately suspect her. +But Kya is not what they say. A born naturalist with just one day of school, she takes life's lessons from the land, learning the real ways of the world from the dishonest signals of fireflies. But while she has the skills to live in solitude forever, the time comes when she yearns to be touched and loved. Drawn to two young men from town, who are each intrigued by her wild beauty, Kya opens herself to a new and startling world—until the unthinkable happens.`, + }, +]; + +async function list({ genre }: { genre: string }) { + return db.filter((item) => item.genre === genre).map((item) => ({ name: item.name, id: item.id })); +} + +async function search({ name }: { name: string }) { + return db.filter((item) => item.name.includes(name)).map((item) => ({ name: item.name, id: item.id })); +} + +async function get({ id }: { id: string }) { + return db.find((item) => item.id === id)!; +} + +main(); diff --git a/examples/function-call-stream-raw.ts b/examples/function-call-stream-raw.ts new file mode 100755 index 000000000..be4688aa7 --- /dev/null +++ b/examples/function-call-stream-raw.ts @@ -0,0 +1,185 @@ +#!/usr/bin/env -S npm run tsn -T + +import util from 'util'; +import OpenAI from 'openai'; +import { + ChatCompletionMessage, + ChatCompletionChunk, + ChatCompletionMessageParam, +} from 'openai/resources/chat'; + +// gets API Key from environment variable OPENAI_API_KEY +const openai = new OpenAI(); + +const functions: OpenAI.Chat.ChatCompletionCreateParams.Function[] = [ + { + name: 'list', + description: 'list queries books by genre, and returns a list of names of books', + parameters: { + type: 'object', + properties: { + genre: { type: 'string', enum: ['mystery', 'nonfiction', 'memoir', 'romance', 'historical'] }, + }, + }, + }, + { + name: 'search', + description: 'search queries books by their name and returns a list of book names and their ids', + parameters: { + type: 'object', + properties: { + name: { type: 'string' }, + }, + }, + }, + { + name: 'get', + description: + "get returns a book's detailed information based on the id of the book. Note that this does not accept names, and only IDs, which you can get by using search.", + parameters: { + type: 'object', + properties: { + id: { type: 'string' }, + }, + }, + }, +]; + +async function callFunction(function_call: ChatCompletionMessage.FunctionCall): Promise { + const args = JSON.parse(function_call.arguments!); + switch (function_call.name) { + case 'list': + return await list(args['genre']); + + case 'search': + return await search(args['name']); + + case 'get': + return await get(args['id']); + + default: + throw new Error('No function found'); + } +} + +async function main() { + const messages: ChatCompletionMessageParam[] = [ + { + role: 'system', + content: + 'Please use our book database, which you can access using functions to answer the following questions.', + }, + { + role: 'user', + content: + 'I really enjoyed reading To Kill a Mockingbird, could you recommend me a book that is similar and tell me why?', + }, + ]; + console.log(messages[0]); + console.log(messages[1]); + console.log(); + + while (true) { + const stream = await openai.chat.completions.create({ + model: 'gpt-3.5-turbo', + messages, + functions: functions, + stream: true, + }); + + // Since the stream returns chunks, we need to build up the ChatCompletionMessage object. + // We implement this logic in messageReducer, which coalesces deltas into the message. + // `lineRewriter()` allows us to rewrite the last output with new text, which is one + // way of forwarding the streamed output to a visual interface. + let writeLine = lineRewriter(); + let message = {} as ChatCompletionMessage; + for await (const chunk of stream) { + message = messageReducer(message, chunk); + writeLine(message); + } + console.log(); + messages.push(message); + + // If there is no function call, we're done and can exit this loop + if (!message.function_call) { + return; + } + + // If there is a function call, we generate a new message with the role 'function'. + const result = await callFunction(message.function_call); + const newMessage = { + role: 'function' as const, + name: message.function_call.name!, + content: JSON.stringify(result), + }; + messages.push(newMessage); + + console.log(newMessage); + console.log(); + } +} + +function messageReducer(previous: ChatCompletionMessage, item: ChatCompletionChunk): ChatCompletionMessage { + const reduce = (acc: any, delta: any) => { + acc = { ...acc }; + for (const [key, value] of Object.entries(delta)) { + if (acc[key] === undefined || acc[key] === null) { + acc[key] = value; + } else if (typeof acc[key] === 'string' && typeof value === 'string') { + (acc[key] as string) += value; + } else if (typeof acc[key] === 'object' && !Array.isArray(acc[key])) { + acc[key] = reduce(acc[key], value); + } + } + return acc; + }; + + return reduce(previous, item.choices[0]!.delta) as ChatCompletionMessage; +} + +function lineRewriter() { + let lastMessageLength = 0; + return function write(value: any) { + process.stdout.cursorTo(0); + process.stdout.moveCursor(0, -Math.floor((lastMessageLength - 1) / process.stdout.columns)); + lastMessageLength = util.formatWithOptions({ colors: false, breakLength: Infinity }, value).length; + process.stdout.write(util.formatWithOptions({ colors: true, breakLength: Infinity }, value)); + }; +} + +const db = [ + { + id: 'a1', + name: 'To Kill a Mockingbird', + genre: 'historical', + description: `Compassionate, dramatic, and deeply moving, "To Kill A Mockingbird" takes readers to the roots of human behavior - to innocence and experience, kindness and cruelty, love and hatred, humor and pathos. Now with over 18 million copies in print and translated into forty languages, this regional story by a young Alabama woman claims universal appeal. Harper Lee always considered her book to be a simple love story. Today it is regarded as a masterpiece of American literature.`, + }, + { + id: 'a2', + name: 'All the Light We Cannot See', + genre: 'historical', + description: `In a mining town in Germany, Werner Pfennig, an orphan, grows up with his younger sister, enchanted by a crude radio they find that brings them news and stories from places they have never seen or imagined. Werner becomes an expert at building and fixing these crucial new instruments and is enlisted to use his talent to track down the resistance. Deftly interweaving the lives of Marie-Laure and Werner, Doerr illuminates the ways, against all odds, people try to be good to one another.`, + }, + { + id: 'a3', + name: 'Where the Crawdads Sing', + genre: 'historical', + description: `For years, rumors of the “Marsh Girl” haunted Barkley Cove, a quiet fishing village. Kya Clark is barefoot and wild; unfit for polite society. So in late 1969, when the popular Chase Andrews is found dead, locals immediately suspect her. + +But Kya is not what they say. A born naturalist with just one day of school, she takes life's lessons from the land, learning the real ways of the world from the dishonest signals of fireflies. But while she has the skills to live in solitude forever, the time comes when she yearns to be touched and loved. Drawn to two young men from town, who are each intrigued by her wild beauty, Kya opens herself to a new and startling world—until the unthinkable happens.`, + }, +]; + +async function list(genre: string) { + return db.filter((item) => item.genre === genre).map((item) => ({ name: item.name, id: item.id })); +} + +async function search(name: string) { + return db.filter((item) => item.name.includes(name)).map((item) => ({ name: item.name, id: item.id })); +} + +async function get(id: string) { + return db.find((item) => item.id === id)!; +} + +main(); diff --git a/examples/package.json b/examples/package.json new file mode 100644 index 000000000..3b27b221f --- /dev/null +++ b/examples/package.json @@ -0,0 +1,18 @@ +{ + "name": "openai-examples", + "version": "1.0.0", + "description": "Usage examples for the OpenAI Node.js SDK.", + "main": "index.js", + "license": "MIT", + "private": true, + "dependencies": { + "express": "^4.18.2", + "next": "^13.5.5", + "openai": "file:..", + "zod-to-json-schema": "^3.21.4" + }, + "devDependencies": { + "@types/body-parser": "^1.19.3", + "@types/express": "^4.17.19" + } +} diff --git a/examples/raw-response.ts b/examples/raw-response.ts index e114f75d4..6b1df31e8 100644 --- a/examples/raw-response.ts +++ b/examples/raw-response.ts @@ -1,4 +1,4 @@ -#!/usr/bin/env yarn tsn -T +#!/usr/bin/env -S yarn tsn -T import OpenAI from 'openai'; diff --git a/examples/stream-to-client-browser.ts b/examples/stream-to-client-browser.ts new file mode 100755 index 000000000..4430ccc68 --- /dev/null +++ b/examples/stream-to-client-browser.ts @@ -0,0 +1,30 @@ +#!/usr/bin/env -S npm run tsn -T + +/** + * This file is intended be run from the command-line with Node + * for easy demo purposes, but simulating use in the browser. + * + * To run it in a browser application, copy/paste it into a frontend application, + * remove the 'node-fetch' import, and replace `process.stdout.write` with + * a console.log or UI display. + */ +import fetch from 'node-fetch'; +import { ChatCompletionStream } from 'openai/lib/ChatCompletionStream'; + +fetch('http://localhost:3000', { + method: 'POST', + body: 'Tell me why dogs are better than cats', + headers: { 'Content-Type': 'text/plain' }, +}).then(async (res) => { + // @ts-ignore ReadableStream on different environments can be strange + const runner = ChatCompletionStream.fromReadableStream(res.body); + + runner.on('content', (delta, snapshot) => { + process.stdout.write(delta); + // or, in a browser, you might display like this: + // document.body.innerText += delta; // or: + // document.body.innerText = snapshot; + }); + + console.dir(await runner.finalChatCompletion(), { depth: null }); +}); diff --git a/examples/stream-to-client-express.ts b/examples/stream-to-client-express.ts new file mode 100755 index 000000000..f688f42e7 --- /dev/null +++ b/examples/stream-to-client-express.ts @@ -0,0 +1,52 @@ +#!/usr/bin/env -S npm run tsn -T + +// This file demonstrates how to stream from the server the chunks as +// a new-line separated JSON-encoded stream. + +import OpenAI from 'openai'; +import express, { Request, Response } from 'express'; + +const openai = new OpenAI(); +const app = express(); + +app.use(express.text()); + +// This endpoint can be called with: +// +// curl 127.0.0.1:3000 -N -X POST -H 'Content-Type: text/plain' \ +// --data 'Can you explain why dogs are better than cats?' +// +// Or consumed with fetch: +// +// fetch('http://localhost:3000', { +// method: 'POST', +// body: 'Tell me why dogs are better than cats', +// }).then(async res => { +// const runner = ChatCompletionStreamingRunner.fromReadableStream(res) +// }) +// +// See examples/stream-to-client-browser.ts for a more complete example. +app.post('/', async (req: Request, res: Response) => { + try { + console.log('Received request:', req.body); + + const stream = openai.beta.chat.completions.stream({ + model: 'gpt-3.5-turbo', + stream: true, + messages: [{ role: 'user', content: req.body }], + }); + + res.header('Content-Type', 'text/plain'); + for await (const chunk of stream.toReadableStream()) { + res.write(chunk); + } + + res.end(); + } catch (e) { + console.error(e); + } +}); + +app.listen('3000', () => { + console.log('Started proxy express server'); +}); diff --git a/examples/stream-to-client-next.ts b/examples/stream-to-client-next.ts new file mode 100755 index 000000000..c5c1ff317 --- /dev/null +++ b/examples/stream-to-client-next.ts @@ -0,0 +1,38 @@ +import OpenAI from 'openai'; +import type { NextApiRequest, NextApiResponse } from 'next'; + +// This file demonstrates how to stream from a Next.JS server as +// a new-line separated JSON-encoded stream. This file cannot be run +// without Next.JS scaffolding. + +export const runtime = 'edge'; + +// This endpoint can be called with: +// +// curl 127.0.0.1:3000 -N -X POST -H 'Content-Type: text/plain' \ +// --data 'Can you explain why dogs are better than cats?' +// +// Or consumed with fetch: +// +// fetch('http://localhost:3000', { +// method: 'POST', +// body: 'Tell me why dogs are better than cats', +// }).then(async res => { +// const runner = ChatCompletionStreamingRunner.fromReadableStream(res) +// }) +// +// See examples/stream-to-client-browser.ts for a more complete example. +export default async function handler(req: NextApiRequest, res: NextApiResponse) { + const openai = new OpenAI(); + + const stream = openai.beta.chat.completions.stream({ + model: 'gpt-3.5-turbo', + stream: true, + // @ts-ignore + messages: [{ role: 'user', content: await req.text() }], + }); + + return res.send(stream.toReadableStream()); + // @ts-ignore -- Or, for the app router: + return new Response(stream.toReadableStream()); +} diff --git a/examples/stream-to-client-raw.ts b/examples/stream-to-client-raw.ts new file mode 100755 index 000000000..4362f2dff --- /dev/null +++ b/examples/stream-to-client-raw.ts @@ -0,0 +1,57 @@ +#!/usr/bin/env -S npm run tsn -T + +// This file demonstrates how to stream from the server as a text/plain +// response with express and the stream async iterator. + +import OpenAI from 'openai'; +import express, { Request, Response } from 'express'; + +const openai = new OpenAI(); +const app = express(); + +app.use(express.text()); + +// This endpoint can be called with: +// +// curl 127.0.0.1:3000 -N -X POST -H 'Content-Type: text/plain' \ +// --data 'Can you explain why dogs are better than cats?' +// +// Or consumed with fetch: +// +// fetch('http://localhost:3000', { +// method: 'POST', +// body: 'Tell me why dogs are better than cats', +// }).then(async res => { +// const decoder = new TextDecoder(); +// for await (const chunk of res.body) { +// console.log(`chunk: ${decoder.decode(chunk)}`); +// } +// }) +// +app.post('/', async (req: Request, res: Response) => { + try { + console.log('Received request:', req.body); + + const stream = await openai.chat.completions.create({ + model: 'gpt-3.5-turbo', + stream: true, + messages: [{ role: 'user', content: req.body }], + }); + + res.header('Content-Type', 'text/plain'); + + // Sends each content stream chunk-by-chunk, such that the client + // ultimately receives a single string. + for await (const chunk of stream) { + res.write(chunk.choices[0]?.delta.content || ''); + } + + res.end(); + } catch (e) { + console.error(e); + } +}); + +app.listen('3000', () => { + console.log('Started proxy express server'); +}); diff --git a/examples/stream.ts b/examples/stream.ts new file mode 100644 index 000000000..f3b712e8e --- /dev/null +++ b/examples/stream.ts @@ -0,0 +1,24 @@ +#!/usr/bin/env -S npm run tsn -T + +import OpenAI from 'openai'; + +const openai = new OpenAI(); + +async function main() { + const runner = await openai.beta.chat.completions + .stream({ + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'Say this is a test' }], + }) + .on('message', (msg) => console.log(msg)) + .on('content', (diff) => process.stdout.write(diff)); + + for await (const chunk of runner) { + console.log('chunk', chunk); + } + + const result = await runner.finalChatCompletion(); + console.log(result); +} + +main(); diff --git a/examples/tsconfig.json b/examples/tsconfig.json new file mode 100644 index 000000000..6c3477462 --- /dev/null +++ b/examples/tsconfig.json @@ -0,0 +1,3 @@ +{ + "extends": "../tsconfig.json" +} diff --git a/helpers.md b/helpers.md new file mode 100644 index 000000000..1ae25ef82 --- /dev/null +++ b/helpers.md @@ -0,0 +1,289 @@ +# Chat Completion Helpers + +## Streaming Responses + +```ts +openai.chat.completions.stream({ stream?: false, … }, options?): ChatCompletionStreamingRunner +``` + +`openai.chat.completions.stream()` returns a `ChatCompletionStreamingRunner`, which emits events, has an async +iterator, and exposes a helper methods to accumulate chunks into a convenient shape and make it easy to reason +about the conversation. + +Alternatively, you can use `openai.chat.completions.create({ stream: true, … })` which returns an async +iteratable of the chunks in the stream and uses less memory (most notably, it does not accumulate a final chat +completion object for you). + +If you need to cancel a stream, you can `break` from a `for await` loop or call `stream.abort()`. + +See an example of streaming helpers in action in [`examples/stream.ts`](examples/stream.ts). + +## Automated Function Calls + +```ts +openai.chat.completions.runFunctions({ stream: false, … }, options?): ChatCompletionRunner +openai.chat.completions.runFunctions({ stream: true, … }, options?): ChatCompletionStreamingRunner +``` + +`openai.chat.completions.runFunctions()` returns either a Runner for automating function calls with chat +completions. The runner automatically calls the JavaScript functions you provide and sends their results back +to the API, looping as long as the model requests function calls. + +If you pass a `parse` function, it will automatically parse the `arguments` for you and returns any parsing +errors to the model to attempt auto-recovery. Otherwise, the args will be passed to the function you provide +as a string. + +```ts +client.chat.completions.runFunctions({ + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: 'How's the weather this week?' }], + functions: [{ + function: getWeather as (args: { location: string, time: Date}) => any, + parse: parseFunction as (args: strings) => { location: string, time: Date }. + parameters: { + type: 'object', + properties: { + location: { type: 'string' }, + time: { type: 'string', format: 'date-time' }, + }, + }, + }], +}); +``` + + +If you pass `function_call: {name: …}` instead of `auto`, it returns immediately after calling that +function (and only loops to auto-recover parsing errors). + +By default, we run the loop up to five chat completions from the API. You can change this behavior by +adjusting `maxChatCompletions` in the request options object. Note that `max_tokens` is the limit per +chat completion request, not for the entire run functions call run. + +See an example of automated function calls in action in +[`examples/function-call-helpers.ts`](examples/function-call-helpers.ts). + +## Runner API + +### Events + +#### `.on('connect', () => …)` + +The first event that is fired when the connection with the OpenAI API is established. + +#### `.on('chunk', (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => …)` (with `stream`) + +The event fired when a chunk is received from the API. Not fired when it is not streaming. The snapshot +returns an accumulated `ChatCompletionSnapshot`, which has a similar shape to `ChatCompletion` with optional +fields and is built up from the chunks. + +#### `.on('chatCompletion', (completion: ChatCompletion) => …)` + +The event fired when a chat completion is returned or done being streamed by the API. + +#### `.on('message', (message: ChatCompletionMessage | ChatCompletionMessageParam) => …)` + +The event fired when a new message is either sent or received from the API. Does not fire for the messages +sent as the parameter to either `.runFunctions()` or `.stream()` + +#### `.on('content', (content: string) => …)` (without `stream`) + +The event fired when a message from the `assistant` is received from the API. + +#### `.on('content', (delta: string, snapshot: string) => …)` (with `stream`) + +The event fired when a chunk from the `assistant` is received from the API. The `delta` argument contains the +content of the chunk, while the `snapshot` returns the accumulated content for the current message. + +#### `.on('functionCall', (functionCall: ChatCompletionMessage.FunctionCall) => …)` + +The event fired when a function call is made by the assistant. + +#### `.on('functionCallResult', (content: string) => …)` + +The event fired when the function runner responds to the function call with `role: "function"`. The `content` of the +response is given as the first argument to the callback. + +#### `.on('finalChatCompletion', (completion: ChatCompletion) => …)` + +The event fired for the final chat completion. If the function call runner exceeds the number +`maxChatCompletions`, then the last chat completion is given. + +#### `.on('finalContent', (contentSnapshot: string) => …)` + +The event fired for the `content` of the last `role: "assistant"` message. Not fired if there is no `assistant` +message. + +#### `.on('finalMessage', (message: ChatCompletionMessage | ChatCompletionMessageParam) => …)` + +The event fired for the last message. + +#### `.on('finalFunctionCall', (functionCall: ChatCompletionMessage.FunctionCall) => …)` + +The event fired for the last message with a defined `function_call`. + +#### `.on('finalFunctionCallResult', (content: string) => …)` + +The event fired for the last message with a `role: "function"`. + +#### `.on('error', (error: OpenAIError) => …)` + +The event fired when an error is encountered outside of a `parse` function or an abort. + +#### `.on('abort', (error: APIUserAbortError) => …)` + +The event fired when the stream receives a signal to abort. + +#### `.on('totalUsage', (usage: CompletionUsage) => …)` (without `stream`, usage is not currently reported with `stream`) + +The event fired at the end, returning the total usage of the call. + +#### `.on('end', () => …)` + +The last event fired in the stream. + +### Methods + +#### `.abort()` + +Aborts the runner and the streaming request, equivalent to `.controller.abort()`. Calling `.abort()` on a +`ChatCompletionStreamingRunner` will also abort any in-flight network requests. + +#### `await .done()` + +An empty promise which resolves when the stream is done. + +#### `await .finalChatCompletion()` + +A promise which resolves with the final chat completion that was received from the API. Throws if the request +ends before a complete chat completion is returned. + +#### `await .allChatCompletions()` + +A promise which resolves with The array of all chat completions that were received from the API. + +#### `await .finalContent()` + +A promise which resolves with the `content` of the last `role: "assistant"` message. Throws if no such message +can be found. + +#### `await .finalMessage()` + +A promise which resolves with the last message. + +#### `await .finalFunctionCall()` + +A promise which resolves with the last message with a defined `function_call`. Throws if no such message is +found. + +#### `await .finalFunctionCallResult()` + +A promise which resolves with the last message with a `role: "function"`. Throws if no such message is found. + +#### `await .totalUsage()` (without `stream`, usage is not currently reported with `stream`) + +A promise which resolves with the total usage. + +### Fields + +#### `.messages` + +A mutable array of all messages in the conversation. + +#### `.controller` + +The underlying `AbortController` for the runner. + +## Examples + +### Abort on a function call + +If you have a function call flow which you intend to _end_ with a certain function call, then you can use the second +argument `runner` given to the function to either mutate `runner.messages` or call `runner.abort()`. + +```ts +import OpenAI from 'openai'; + +const client = new OpenAI(); + +async function main() { + const runner = client.chat.completions + .runFunctions({ + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: "How's the weather this week in Los Angeles?" }], + functions: [ + { + function: function queryDatabase(props) { … }, + … + }, + { + function: function updateDatabase(props, runner) { + runner.abort() + }, + … + }, + ], + }) + .on('message', (message) => console.log(message)); + + const finalFunctionCall = await runner.finalFunctionCall(); + console.log('Final function call:', finalFunctionCall); +} + +main(); +``` + + +### Integrate with `zod` + +[`zod`](https://www.npmjs.com/package/zod) is a schema validation library which can help with validating the +assistant's response to make sure it conforms to a schema. Paired with [`zod-to-json-schema`](https://www.npmjs.com/package/zod-to-json-schema), the validation schema also acts as the `parameters` JSON Schema passed to the API. + +```ts +import OpenAI from 'openai'; +import { z } from 'zod'; +import { zodToJsonSchema } from 'zod-to-json-schema'; + +const client = new OpenAI(); + +async function main() { + const runner = client.chat.completions + .runFunctions({ + model: 'gpt-3.5-turbo', + messages: [{ role: 'user', content: "How's the weather this week in Los Angeles?" }], + functions: [ + { + function: getWeather, + parse: GetWeatherParameters.parse, + parameters: zodToJsonSchema(GetWeatherParameters), + }, + ], + }) + .on('message', (message) => console.log(message)); + + const finalContent = await runner.finalContent(); + console.log('Final content:', finalContent); +} + +const GetWeatherParameters = z.object({ + location: z.enum(['Boston', 'New York City', 'Los Angeles', 'San Francisco']), +}); + +async function getWeather(args: z.infer) { + const { location } = args; + // … do lookup … + return { temperature, precipitation }; +} + +main(); +``` + +See a more fully-fledged example in [`examples/function-call-helpers-zod.ts`](examples/function-call-helpers-zod.ts). + +### Integrate with Next.JS + +See an example of a Next.JS integration here [`examples/stream-to-client-next.ts`](examples/stream-to-client-next.ts). + +### Proxy Streaming to a Browser + +See an example of using express to stream to a browser here [`examples/stream-to-client-express.ts`](examples/stream-to-client-express.ts). + diff --git a/src/index.ts b/src/index.ts index 4c6d0ba7a..35587dda3 100644 --- a/src/index.ts +++ b/src/index.ts @@ -148,6 +148,7 @@ export class OpenAI extends Core.APIClient { models: API.Models = new API.Models(this); fineTuning: API.FineTuning = new API.FineTuning(this); fineTunes: API.FineTunes = new API.FineTunes(this); + beta: API.Beta = new API.Beta(this); protected override defaultQuery(): Core.DefaultQuery | undefined { return this._options.defaultQuery; @@ -279,6 +280,8 @@ export namespace OpenAI { export import FineTuneListEventsParams = API.FineTuneListEventsParams; export import FineTuneListEventsParamsNonStreaming = API.FineTuneListEventsParamsNonStreaming; export import FineTuneListEventsParamsStreaming = API.FineTuneListEventsParamsStreaming; + + export import Beta = API.Beta; } export default OpenAI; diff --git a/src/lib/AbstractChatCompletionRunner.ts b/src/lib/AbstractChatCompletionRunner.ts new file mode 100644 index 000000000..c8ee555a3 --- /dev/null +++ b/src/lib/AbstractChatCompletionRunner.ts @@ -0,0 +1,488 @@ +import * as Core from 'openai/core'; +import { type CompletionUsage } from 'openai/resources/completions'; +import { + type Completions, + type ChatCompletion, + type ChatCompletionMessage, + type ChatCompletionMessageParam, + type ChatCompletionCreateParams, +} from 'openai/resources/chat/completions'; +import { APIUserAbortError, OpenAIError } from 'openai/error'; +import { + type RunnableFunction, + isRunnableFunctionWithParse, + type BaseFunctionsArgs, +} from './RunnableFunction'; +import { ChatCompletionFunctionRunnerParams } from './ChatCompletionRunner'; +import { ChatCompletionStreamingFunctionRunnerParams } from './ChatCompletionStreamingRunner'; + +export abstract class AbstractChatCompletionRunner< + Events extends CustomEvents = AbstractChatCompletionRunnerEvents, +> { + controller: AbortController = new AbortController(); + + #connectedPromise: Promise; + #resolveConnectedPromise: () => void = () => {}; + #rejectConnectedPromise: (error: OpenAIError) => void = () => {}; + + #endPromise: Promise; + #resolveEndPromise: () => void = () => {}; + #rejectEndPromise: (error: OpenAIError) => void = () => {}; + + #listeners: { [Event in keyof Events]?: ListenersForEvent } = {}; + + protected _chatCompletions: ChatCompletion[] = []; + messages: (ChatCompletionMessage | ChatCompletionMessageParam)[] = []; + + #ended = false; + #errored = false; + #aborted = false; + #catchingPromiseCreated = false; + + constructor() { + this.#connectedPromise = new Promise((resolve, reject) => { + this.#resolveConnectedPromise = resolve; + this.#rejectConnectedPromise = reject; + }); + + this.#endPromise = new Promise((resolve, reject) => { + this.#resolveEndPromise = resolve; + this.#rejectEndPromise = reject; + }); + + // Don't let these promises cause unhandled rejection errors. + // we will manually cause an unhandled rejection error later + // if the user hasn't registered any error listener or called + // any promise-returning method. + this.#connectedPromise.catch(() => {}); + this.#endPromise.catch(() => {}); + } + + protected _run(executor: () => Promise) { + // Unfortunately if we call `executor()` immediately we get runtime errors about + // references to `this` before the `super()` constructor call returns. + setTimeout(() => { + executor().then(() => { + this._emitFinal(); + this._emit('end'); + }, this.#handleError); + }, 0); + } + + protected _addChatCompletion(chatCompletion: ChatCompletion): ChatCompletion { + this._chatCompletions.push(chatCompletion); + this._emit('chatCompletion', chatCompletion); + const message = chatCompletion.choices[0]?.message; + if (message) this._addMessage(message); + return chatCompletion; + } + + protected _addMessage(message: ChatCompletionMessage | ChatCompletionMessageParam, emit = true) { + this.messages.push(message); + if (emit) { + this._emit('message', message); + if (message.role === 'function' && message.content) { + this._emit('functionCallResult', message.content); + } else if (message.function_call) { + this._emit('functionCall', message.function_call); + } + } + } + + protected _connected() { + if (this.ended) return; + this.#resolveConnectedPromise(); + this._emit('connect'); + } + + get ended(): boolean { + return this.#ended; + } + + get errored(): boolean { + return this.#errored; + } + + get aborted(): boolean { + return this.#aborted; + } + + abort() { + this.controller.abort(); + } + + /** + * Adds the listener function to the end of the listeners array for the event. + * No checks are made to see if the listener has already been added. Multiple calls passing + * the same combination of event and listener will result in the listener being added, and + * called, multiple times. + * @returns this ChatCompletionStream, so that calls can be chained + */ + on(event: Event, listener: ListenerForEvent): this { + const listeners: ListenersForEvent = + this.#listeners[event] || (this.#listeners[event] = []); + listeners.push({ listener }); + return this; + } + + /** + * Removes the specified listener from the listener array for the event. + * off() will remove, at most, one instance of a listener from the listener array. If any single + * listener has been added multiple times to the listener array for the specified event, then + * off() must be called multiple times to remove each instance. + * @returns this ChatCompletionStream, so that calls can be chained + */ + off(event: Event, listener: ListenerForEvent): this { + const listeners = this.#listeners[event]; + if (!listeners) return this; + const index = listeners.findIndex((l) => l.listener === listener); + if (index >= 0) listeners.splice(index, 1); + return this; + } + + /** + * Adds a one-time listener function for the event. The next time the event is triggered, + * this listener is removed and then invoked. + * @returns this ChatCompletionStream, so that calls can be chained + */ + once(event: Event, listener: ListenerForEvent): this { + const listeners: ListenersForEvent = + this.#listeners[event] || (this.#listeners[event] = []); + listeners.push({ listener, once: true }); + return this; + } + + /** + * This is similar to `.once()`, but returns a Promise that resolves the next time + * the event is triggered, instead of calling a listener callback. + * @returns a Promise that resolves the next time given event is triggered, + * or rejects if an error is emitted. (If you request the 'error' event, + * returns a promise that resolves with the error). + * + * Example: + * + * const message = await stream.emitted('message') // rejects if the stream errors + */ + emitted( + event: Event, + ): Promise< + EventParameters extends [infer Param] ? Param + : EventParameters extends [] ? void + : EventParameters + > { + return new Promise((resolve, reject) => { + this.#catchingPromiseCreated = true; + if (event !== 'error') this.once('error', reject); + this.once(event, resolve as any); + }); + } + + async done(): Promise { + this.#catchingPromiseCreated = true; + await this.#endPromise; + } + + /** + * @returns a promise that resolves with the final ChatCompletion, or rejects + * if an error occurred or the stream ended prematurely without producing a ChatCompletion. + */ + async finalChatCompletion(): Promise { + await this.done(); + const completion = this._chatCompletions[this._chatCompletions.length - 1]; + if (!completion) throw new OpenAIError('stream ended without producing a ChatCompletion'); + return completion; + } + + #getFinalContent(): string | null { + for (let i = this.messages.length - 1; i >= 0; i--) { + const message = this.messages[i]; + if (message?.role === 'assistant') return message.content; + } + return null; + } + + /** + * @returns a promise that resolves with the content of the final ChatCompletionMessage, or rejects + * if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage. + */ + async finalContent(): Promise { + await this.done(); + return this.#getFinalContent(); + } + + /** + * @returns a promise that resolves with the the final ChatCompletionMessage, or rejects + * if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage. + */ + async finalMessage(): Promise { + await this.done(); + const message = this.messages[this.messages.length - 1]; + if (!message) throw new OpenAIError('stream ended without producing a ChatCompletionMessage'); + return message; + } + + #getFinalFunctionCall(): ChatCompletionMessage.FunctionCall | undefined { + for (let i = this.messages.length - 1; i >= 0; i--) { + const message = this.messages[i]; + if (message?.function_call) return message.function_call; + } + } + + /** + * @returns a promise that resolves with the content of the final FunctionCall, or rejects + * if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage. + */ + async finalFunctionCall(): Promise { + await this.done(); + return this.#getFinalFunctionCall(); + } + + #getFinalFunctionCallResult(): string | undefined { + for (let i = this.messages.length - 1; i >= 0; i--) { + const message = this.messages[i]; + if (message?.role === 'function' && message.content != null) return message.content; + } + } + + async finalFunctionCallResult(): Promise { + await this.done(); + return this.#getFinalFunctionCallResult(); + } + + #calculateTotalUsage(): CompletionUsage { + const total: CompletionUsage = { + completion_tokens: 0, + prompt_tokens: 0, + total_tokens: 0, + }; + for (const { usage } of this._chatCompletions) { + if (usage) { + total.completion_tokens += usage.completion_tokens; + total.prompt_tokens += usage.prompt_tokens; + total.total_tokens += usage.total_tokens; + } + } + return total; + } + + async totalUsage(): Promise { + await this.done(); + return this.#calculateTotalUsage(); + } + + allChatCompletions(): ChatCompletion[] { + return [...this._chatCompletions]; + } + + #handleError = (error: unknown) => { + this.#errored = true; + if (error instanceof Error && error.name === 'AbortError') { + error = new APIUserAbortError(); + } + if (error instanceof APIUserAbortError) { + this.#aborted = true; + this._emit('abort', error); + } + const openAIError: OpenAIError = + error instanceof OpenAIError ? error : ( + new OpenAIError(error instanceof Error ? error.message : String(error)) + ); + this._emit('error', openAIError); + }; + + protected _emit(event: Event, ...args: EventParameters) { + // make sure we don't emit any events after end + if (this.#ended) return; + + if (event === 'end') { + this.#ended = true; + this.#resolveEndPromise(); + } + + const listeners: ListenersForEvent | undefined = this.#listeners[event]; + if (listeners) { + this.#listeners[event] = listeners.filter((l) => !l.once) as any; + listeners.forEach(({ listener }: any) => listener(...args)); + } + + if (event === 'error') { + // NOTE: _emit('error', error) should only be called from #handleError(). + + const error = args[0] as OpenAIError; + if (!this.#catchingPromiseCreated && !listeners?.length) { + // Trigger an unhandled rejection if the user hasn't registered any error handlers. + // If you are seeing stack traces here, make sure to handle errors via either: + // - runner.on('error', () => ...) + // - await runner.done() + // - await runner.finalChatCompletion() + // - etc. + Promise.reject(error); + } + this.#rejectConnectedPromise(error); + this.#rejectEndPromise(error); + this._emit('end'); + } + } + + protected _emitFinal() { + const completion = this._chatCompletions[this._chatCompletions.length - 1]; + if (completion) this._emit('finalChatCompletion', completion); + const finalMessage = this.messages[this.messages.length - 1]; + if (finalMessage) this._emit('finalMessage', finalMessage); + const finalContent = this.#getFinalContent(); + if (finalContent) this._emit('finalContent', finalContent); + + const finalFunctionCall = this.#getFinalFunctionCall(); + if (finalFunctionCall) this._emit('finalFunctionCall', finalFunctionCall); + + const finalFunctionCallResult = this.#getFinalFunctionCallResult(); + if (finalFunctionCallResult != null) this._emit('finalFunctionCallResult', finalFunctionCallResult); + + if (this._chatCompletions.some((c) => c.usage)) { + this._emit('totalUsage', this.#calculateTotalUsage()); + } + } + + protected async _createChatCompletion( + completions: Completions, + params: ChatCompletionCreateParams, + options?: Core.RequestOptions, + ): Promise { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + const chatCompletion = await completions.create( + { ...params, stream: false }, + { ...options, signal: this.controller.signal }, + ); + this._connected(); + return this._addChatCompletion(chatCompletion); + } + + protected async _runChatCompletion( + completions: Completions, + params: ChatCompletionCreateParams, + options?: Core.RequestOptions, + ): Promise { + for (const message of params.messages) { + this._addMessage(message, false); + } + return await this._createChatCompletion(completions, params, options); + } + + protected async _runFunctions( + completions: Completions, + params: + | ChatCompletionFunctionRunnerParams + | ChatCompletionStreamingFunctionRunnerParams, + options?: Core.RequestOptions & { maxChatCompletions?: number }, + ) { + const { function_call = 'auto', stream, ...restParams } = params; + const isSingleFunctionCall = typeof function_call !== 'string' && function_call?.name; + + const functionsByName: Record> = {}; + for (const f of params.functions) { + functionsByName[f.name || f.function.name] = f; + } + + const functions: ChatCompletionCreateParams.Function[] = params.functions.map( + (f): ChatCompletionCreateParams.Function => ({ + name: f.name || f.function.name, + parameters: f.parameters as Record, + description: f.description, + }), + ); + + for (const message of params.messages) { + this._addMessage(message, false); + } + + for (let i = 0; i < (options?.maxChatCompletions ?? 5); ++i) { + const chatCompletion: ChatCompletion = await this._createChatCompletion( + completions, + { + ...restParams, + function_call, + functions, + messages: [...this.messages], + }, + options, + ); + const message = chatCompletion.choices[0]?.message; + if (!message) { + throw new OpenAIError(`missing message in ChatCompletion response`); + } + if (!message.function_call) return; + const { name, arguments: args } = message.function_call; + const fn = functionsByName[name]; + if (!fn || (typeof function_call !== 'string' && name !== function_call?.name)) { + this._addMessage({ + role: 'function', + name, + content: `Invalid function_call: ${JSON.stringify(name)}. Available options are: ${functions + .map((f) => JSON.stringify(f.name)) + .join(', ')}. Please try again`, + }); + if (isSingleFunctionCall) return; + continue; + } + let parsed; + try { + parsed = isRunnableFunctionWithParse(fn) ? await fn.parse(args) : args; + } catch (error) { + this._addMessage({ + role: 'function', + name, + content: error instanceof Error ? error.message : String(error), + }); + continue; + } + const rawContent = await (fn.function as any)(parsed as any, this); + const content = + typeof rawContent === 'string' ? rawContent + : rawContent === undefined ? 'undefined' + : JSON.stringify(rawContent); + this._addMessage({ role: 'function', name, content }); + + if (isSingleFunctionCall) return; + } + } +} + +type CustomEvents = { + [k in Event]: k extends keyof AbstractChatCompletionRunnerEvents ? AbstractChatCompletionRunnerEvents[k] + : (...args: any[]) => void; +}; + +type ListenerForEvent< + Events extends CustomEvents, + Event extends keyof Events, +> = Event extends keyof AbstractChatCompletionRunnerEvents ? AbstractChatCompletionRunnerEvents[Event] +: Events[Event]; + +type ListenersForEvent, Event extends keyof Events> = Array<{ + listener: ListenerForEvent; + once?: boolean; +}>; +type EventParameters, Event extends keyof Events> = Parameters< + ListenerForEvent +>; + +export interface AbstractChatCompletionRunnerEvents { + connect: () => void; + functionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void; + message: (message: ChatCompletionMessage | ChatCompletionMessageParam) => void; + chatCompletion: (completion: ChatCompletion) => void; + finalContent: (contentSnapshot: string) => void; + finalMessage: (message: ChatCompletionMessage | ChatCompletionMessageParam) => void; + finalChatCompletion: (completion: ChatCompletion) => void; + finalFunctionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void; + functionCallResult: (content: string) => void; + finalFunctionCallResult: (content: string) => void; + error: (error: OpenAIError) => void; + abort: (error: APIUserAbortError) => void; + end: () => void; + totalUsage: (usage: CompletionUsage) => void; +} diff --git a/src/lib/ChatCompletionRunFunctions.test.ts b/src/lib/ChatCompletionRunFunctions.test.ts new file mode 100644 index 000000000..677f9513e --- /dev/null +++ b/src/lib/ChatCompletionRunFunctions.test.ts @@ -0,0 +1,1987 @@ +import OpenAI from 'openai'; +import { OpenAIError } from 'openai/error'; +import { PassThrough } from 'stream'; +import { + ParsingFunction, + type ChatCompletionRunner, + type ChatCompletionFunctionRunnerParams, + ChatCompletionStreamingRunner, + type ChatCompletionStreamingFunctionRunnerParams, +} from 'openai/resources/beta/chat/completions'; + +import { type RequestInfo, type RequestInit } from 'openai/_shims/index'; +import { Response } from 'node-fetch'; + +type Fetch = (req: string | RequestInfo, init?: RequestInit) => Promise; + +/** + * Creates a mock `fetch` function and a `handleRequest` function for intercepting `fetch` calls. + * + * You call `handleRequest` with a callback function that handles the next `fetch` call. + * It returns a Promise that: + * - waits for the next call to `fetch` + * - calls the callback with the `fetch` arguments + * - resolves `fetch` with the callback output + */ +function mockFetch(): { fetch: Fetch; handleRequest: (handle: Fetch) => Promise } { + const fetchQueue: ((handler: typeof fetch) => void)[] = []; + const handlerQueue: Promise[] = []; + + const enqueueHandler = () => { + handlerQueue.push( + new Promise((resolve) => { + fetchQueue.push((handle: typeof fetch) => { + enqueueHandler(); + resolve(handle); + }); + }), + ); + }; + enqueueHandler(); + + async function fetch(req: string | RequestInfo, init?: RequestInit): Promise { + const handler = await handlerQueue.shift(); + if (!handler) throw new Error('expected handler to be defined'); + const signal = init?.signal; + if (!signal) return await handler(req, init); + return await Promise.race([ + handler(req, init), + new Promise((resolve, reject) => { + if (signal.aborted) { + // @ts-ignore does exist in Node + reject(new DOMException('The user aborted a request.', 'AbortError')); + return; + } + signal.addEventListener('abort', (e) => { + // @ts-ignore does exist in Node + reject(new DOMException('The user aborted a request.', 'AbortError')); + }); + }), + ]); + } + + function handleRequest(handle: typeof fetch): Promise { + return new Promise((resolve) => { + fetchQueue.shift()?.(async (req, init) => { + try { + return await handle(req, init); + } finally { + resolve(); + } + }); + }); + } + + return { fetch, handleRequest }; +} + +// mockChatCompletionFetch is like mockFetch, but with better a more convenient handleRequest to mock +// chat completion request/responses. +function mockChatCompletionFetch() { + const { fetch, handleRequest: handleRawRequest } = mockFetch(); + + function handleRequest( + handler: (body: ChatCompletionFunctionRunnerParams) => Promise, + ): Promise { + return handleRawRequest(async (req, init) => { + const rawBody = init?.body; + if (typeof rawBody !== 'string') throw new Error(`expected init.body to be a string`); + const body: ChatCompletionFunctionRunnerParams = JSON.parse(rawBody); + return new Response(JSON.stringify(await handler(body)), { + headers: { 'Content-Type': 'application/json' }, + }); + }); + } + return { fetch, handleRequest }; +} + +// mockStreamingChatCompletionFetch is like mockFetch, but with better a more convenient handleRequest to mock +// streaming chat completion request/responses. +function mockStreamingChatCompletionFetch() { + const { fetch, handleRequest: handleRawRequest } = mockFetch(); + + function handleRequest( + handler: ( + body: ChatCompletionStreamingFunctionRunnerParams, + ) => AsyncIterable, + ): Promise { + return handleRawRequest(async (req, init) => { + const rawBody = init?.body; + if (typeof rawBody !== 'string') throw new Error(`expected init.body to be a string`); + const body: ChatCompletionStreamingFunctionRunnerParams = JSON.parse(rawBody); + const stream = new PassThrough(); + (async () => { + for await (const chunk of handler(body)) { + stream.write(`data: ${JSON.stringify(chunk)}\n\n`); + } + stream.end(`data: [DONE]\n\n`); + })(); + return new Response(stream, { + headers: { + 'Content-Type': 'text/event-stream', + 'Transfer-Encoding': 'chunked', + }, + }); + }); + } + return { fetch, handleRequest }; +} + +// contentChoiceDeltas returns an async iterator which mocks a delta stream of a by splitting the +// argument into chunks separated by whitespace. +function* contentChoiceDeltas( + content: string, + { + index = 0, + role = 'assistant', + }: { index?: number; role?: NonNullable } = {}, +): Iterable { + const deltas = content.split(/\s+/g); + for (let i = 0; i < deltas.length; i++) { + yield { + index, + finish_reason: i === deltas.length - 1 ? 'stop' : null, + delta: { + role, + content: deltas[i] ? `${deltas[i]}${i === deltas.length - 1 ? '' : ' '}` : null, + }, + }; + } +} + +// functionCallDeltas returns an async iterator which mocks a delta stream of a functionCall by splitting +// the argument into chunks separated by whitespace. +function* functionCallDeltas( + args: string, + { + index = 0, + name, + role = 'assistant', + }: { + name: string; + index?: number; + role?: NonNullable; + }, +): Iterable { + const deltas = args.split(/\s+/g); + for (let i = 0; i < deltas.length; i++) { + yield { + index, + finish_reason: i === deltas.length - 1 ? 'function_call' : null, + delta: { + role, + function_call: { + arguments: `${deltas[i] || ''}${i === deltas.length - 1 ? '' : ' '}`, + ...(i === deltas.length - 1 ? { name } : null), + }, + }, + }; + } +} + +class RunnerListener { + readonly contents: string[] = []; + readonly messages: OpenAI.Chat.ChatCompletionMessage[] = []; + readonly chatCompletions: OpenAI.Chat.ChatCompletion[] = []; + readonly functionCalls: OpenAI.Chat.ChatCompletionMessage.FunctionCall[] = []; + readonly functionCallResults: string[] = []; + finalContent: string | null = null; + finalMessage: OpenAI.Chat.ChatCompletionMessage | undefined; + finalChatCompletion: OpenAI.Chat.ChatCompletion | undefined; + finalFunctionCall: OpenAI.Chat.ChatCompletionMessage.FunctionCall | undefined; + finalFunctionCallResult: string | undefined; + totalUsage: OpenAI.CompletionUsage | undefined; + error: OpenAIError | undefined; + gotConnect = false; + gotAbort = false; + gotEnd = false; + + onceMessageCallCount = 0; + + constructor(public runner: ChatCompletionRunner) { + runner + .on('connect', () => (this.gotConnect = true)) + .on('content', (content) => this.contents.push(content)) + .on('message', (message) => this.messages.push(message)) + .on('chatCompletion', (completion) => this.chatCompletions.push(completion)) + .on('functionCall', (functionCall) => this.functionCalls.push(functionCall)) + .on('functionCallResult', (result) => this.functionCallResults.push(result)) + .on('finalContent', (content) => (this.finalContent = content)) + .on('finalMessage', (message) => (this.finalMessage = message)) + .on('finalChatCompletion', (completion) => (this.finalChatCompletion = completion)) + .on('finalFunctionCall', (functionCall) => (this.finalFunctionCall = functionCall)) + .on('finalFunctionCallResult', (result) => (this.finalFunctionCallResult = result)) + .on('totalUsage', (usage) => (this.totalUsage = usage)) + .on('error', (error) => (this.error = error)) + .on('abort', () => (this.gotAbort = true)) + .on('end', () => (this.gotEnd = true)) + .once('message', () => this.onceMessageCallCount++); + } + + async sanityCheck({ error }: { error?: string } = {}) { + expect(this.onceMessageCallCount).toBeLessThanOrEqual(1); + expect(this.gotAbort).toEqual(this.runner.aborted); + if (this.runner.aborted) expect(this.runner.errored).toBe(true); + if (error) { + expect(this.error?.message).toEqual(error); + expect(this.runner.errored).toBe(true); + await expect(this.runner.finalChatCompletion()).rejects.toThrow(error); + await expect(this.runner.finalMessage()).rejects.toThrow(error); + await expect(this.runner.finalContent()).rejects.toThrow(error); + await expect(this.runner.finalFunctionCall()).rejects.toThrow(error); + await expect(this.runner.finalFunctionCallResult()).rejects.toThrow(error); + await expect(this.runner.totalUsage()).rejects.toThrow(error); + await expect(this.runner.done()).rejects.toThrow(error); + } else { + expect(this.error).toBeUndefined(); + expect(this.runner.errored).toBe(false); + } + + if (!this.gotConnect) { + expect(this.contents).toEqual([]); + expect(this.messages).toEqual([]); + expect(this.chatCompletions).toEqual([]); + expect(this.functionCalls).toEqual([]); + expect(this.functionCallResults).toEqual([]); + expect(this.finalContent).toBeUndefined(); + expect(this.finalMessage).toBeUndefined(); + expect(this.finalChatCompletion).toBeUndefined(); + expect(this.finalFunctionCall).toBeUndefined(); + expect(this.finalFunctionCallResult).toBeUndefined(); + expect(this.totalUsage).toBeUndefined(); + expect(this.gotEnd).toBe(true); + return; + } + + if (error) return; + + const expectedContents = this.messages + .filter((m) => m.role === 'assistant') + .map((m) => m.content) + .filter(Boolean); + expect(this.contents).toEqual(expectedContents); + expect(this.finalMessage).toEqual(this.messages[this.messages.length - 1]); + expect(await this.runner.finalMessage()).toEqual(this.finalMessage); + expect(this.finalContent).toEqual(expectedContents[expectedContents.length - 1] ?? null); + expect(await this.runner.finalContent()).toEqual(this.finalContent); + expect(this.finalChatCompletion).toEqual(this.chatCompletions[this.chatCompletions.length - 1]); + expect(await this.runner.finalChatCompletion()).toEqual(this.finalChatCompletion); + expect(this.finalFunctionCall).toEqual(this.functionCalls[this.functionCalls.length - 1]); + expect(await this.runner.finalFunctionCall()).toEqual(this.finalFunctionCall); + expect(this.finalFunctionCallResult).toEqual( + this.functionCallResults[this.functionCallResults.length - 1], + ); + expect(await this.runner.finalFunctionCallResult()).toEqual(this.finalFunctionCallResult); + expect(this.chatCompletions).toEqual(this.runner.allChatCompletions()); + expect(this.messages).toEqual(this.runner.messages.slice(-this.messages.length)); + if (this.chatCompletions.some((c) => c.usage)) { + const totalUsage: OpenAI.CompletionUsage = { + completion_tokens: 0, + prompt_tokens: 0, + total_tokens: 0, + }; + for (const { usage } of this.chatCompletions) { + if (usage) { + totalUsage.completion_tokens += usage.completion_tokens; + totalUsage.prompt_tokens += usage.prompt_tokens; + totalUsage.total_tokens += usage.total_tokens; + } + } + expect(this.totalUsage).toEqual(totalUsage); + expect(await this.runner.totalUsage()).toEqual(totalUsage); + } + + expect(this.gotEnd).toBe(true); + } +} + +class StreamingRunnerListener { + readonly eventChunks: OpenAI.Chat.ChatCompletionChunk[] = []; + readonly eventContents: [string, string][] = []; + readonly eventMessages: OpenAI.Chat.ChatCompletionMessage[] = []; + readonly eventChatCompletions: OpenAI.Chat.ChatCompletion[] = []; + readonly eventFunctionCalls: OpenAI.Chat.ChatCompletionMessage.FunctionCall[] = []; + readonly eventFunctionCallResults: string[] = []; + + finalContent: string | null = null; + finalMessage: OpenAI.Chat.ChatCompletionMessage | undefined; + finalChatCompletion: OpenAI.Chat.ChatCompletion | undefined; + finalFunctionCall: OpenAI.Chat.ChatCompletionMessage.FunctionCall | undefined; + finalFunctionCallResult: string | undefined; + error: OpenAIError | undefined; + gotConnect = false; + gotEnd = false; + + constructor(public runner: ChatCompletionStreamingRunner) { + runner + .on('connect', () => (this.gotConnect = true)) + .on('chunk', (chunk) => this.eventChunks.push(chunk)) + .on('content', (delta, snapshot) => this.eventContents.push([delta, snapshot])) + .on('message', (message) => this.eventMessages.push(message)) + .on('chatCompletion', (completion) => this.eventChatCompletions.push(completion)) + .on('functionCall', (functionCall) => this.eventFunctionCalls.push(functionCall)) + .on('functionCallResult', (result) => this.eventFunctionCallResults.push(result)) + .on('finalContent', (content) => (this.finalContent = content)) + .on('finalMessage', (message) => (this.finalMessage = message)) + .on('finalChatCompletion', (completion) => (this.finalChatCompletion = completion)) + .on('finalFunctionCall', (functionCall) => (this.finalFunctionCall = functionCall)) + .on('finalFunctionCallResult', (result) => (this.finalFunctionCallResult = result)) + .on('error', (error) => (this.error = error)) + .on('end', () => (this.gotEnd = true)); + } + + async sanityCheck({ error }: { error?: string } = {}) { + if (error) { + expect(this.error?.message).toEqual(error); + expect(this.runner.errored).toBe(true); + await expect(this.runner.finalChatCompletion()).rejects.toThrow(error); + await expect(this.runner.finalMessage()).rejects.toThrow(error); + await expect(this.runner.finalContent()).rejects.toThrow(error); + await expect(this.runner.finalFunctionCall()).rejects.toThrow(error); + await expect(this.runner.finalFunctionCallResult()).rejects.toThrow(error); + await expect(this.runner.done()).rejects.toThrow(error); + } else { + expect(this.error).toBeUndefined(); + expect(this.runner.errored).toBe(false); + } + + if (!this.gotConnect) { + expect(this.eventContents).toEqual([]); + expect(this.eventMessages).toEqual([]); + expect(this.eventChatCompletions).toEqual([]); + expect(this.eventFunctionCalls).toEqual([]); + expect(this.eventFunctionCallResults).toEqual([]); + expect(this.finalContent).toBeUndefined(); + expect(this.finalMessage).toBeUndefined(); + expect(this.finalChatCompletion).toBeUndefined(); + expect(this.finalFunctionCall).toBeUndefined(); + expect(this.finalFunctionCallResult).toBeUndefined(); + expect(this.gotEnd).toBe(true); + return; + } + + if (error) return; + + if (this.eventContents.length) expect(this.eventChunks.length).toBeGreaterThan(0); + expect(this.finalMessage).toEqual(this.eventMessages[this.eventMessages.length - 1]); + expect(await this.runner.finalMessage()).toEqual(this.finalMessage); + expect(this.finalContent).toEqual(this.eventContents[this.eventContents.length - 1]?.[1] ?? null); + expect(await this.runner.finalContent()).toEqual(this.finalContent); + expect(this.finalChatCompletion).toEqual(this.eventChatCompletions[this.eventChatCompletions.length - 1]); + expect(await this.runner.finalChatCompletion()).toEqual(this.finalChatCompletion); + expect(this.finalFunctionCall).toEqual(this.eventFunctionCalls[this.eventFunctionCalls.length - 1]); + expect(await this.runner.finalFunctionCall()).toEqual(this.finalFunctionCall); + expect(this.finalFunctionCallResult).toEqual( + this.eventFunctionCallResults[this.eventFunctionCallResults.length - 1], + ); + expect(await this.runner.finalFunctionCallResult()).toEqual(this.finalFunctionCallResult); + expect(this.eventChatCompletions).toEqual(this.runner.allChatCompletions()); + expect(this.eventMessages).toEqual(this.runner.messages.slice(-this.eventMessages.length)); + if (error) { + expect(this.error?.message).toEqual(error); + expect(this.runner.errored).toBe(true); + } else { + expect(this.error).toBeUndefined(); + expect(this.runner.errored).toBe(false); + } + expect(this.gotEnd).toBe(true); + } +} + +function _typeTests() { + const openai = new OpenAI(); + + openai.beta.chat.completions.runFunctions({ + messages: [ + { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' }, + ], + model: 'gpt-3.5-turbo', + functions: [ + { + name: 'numProperties', + function: (obj: object) => String(Object.keys(obj).length), + parameters: { type: 'object' }, + parse: (str: string): object => { + const result = JSON.parse(str); + if (!(result instanceof Object) || Array.isArray(result)) { + throw new Error('must be an object'); + } + return result; + }, + description: 'gets the number of properties on an object', + }, + { + function: (str: string) => String(str.length), + parameters: { type: 'string' }, + description: 'gets the length of a string', + }, + // @ts-expect-error function must accept string if parse is omitted + { + function: (obj: object) => String(Object.keys(obj).length), + parameters: { type: 'object' }, + description: 'gets the number of properties on an object', + }, + ], + }); + openai.beta.chat.completions.runFunctions({ + messages: [ + { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' }, + ], + model: 'gpt-3.5-turbo', + functions: [ + new ParsingFunction({ + name: 'numProperties', + // @ts-expect-error parse and function don't match + parse: (str: string) => str, + function: (obj: object) => String(Object.keys(obj).length), + parameters: { type: 'object' }, + description: 'gets the number of properties on an object', + }), + ], + }); + openai.beta.chat.completions.runFunctions({ + messages: [ + { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' }, + ], + model: 'gpt-3.5-turbo', + functions: [ + new ParsingFunction({ + name: 'numProperties', + parse: (str: string): object => { + const result = JSON.parse(str); + if (!(result instanceof Object) || Array.isArray(result)) { + throw new Error('must be an object'); + } + return result; + }, + function: (obj: object) => String(Object.keys(obj).length), + parameters: { type: 'object' }, + description: 'gets the number of properties on an object', + }), + new ParsingFunction({ + name: 'keys', + parse: (str: string): object => { + const result = JSON.parse(str); + if (!(result instanceof Object)) { + throw new Error('must be an Object'); + } + return result; + }, + function: (obj: object) => Object.keys(obj).join(', '), + parameters: { type: 'object' }, + description: 'gets the number of properties on an object', + }), + new ParsingFunction({ + name: 'len2', + // @ts-expect-error parse and function don't match + parse: (str: string) => str, + function: (obj: object) => String(Object.keys(obj).length), + parameters: { type: 'object' }, + description: 'gets the number of properties on an object', + }), + ], + }); + openai.beta.chat.completions.runFunctions({ + messages: [ + { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' }, + ], + model: 'gpt-3.5-turbo', + // @ts-ignore error occurs here in TS 4 + functions: [ + { + name: 'numProperties', + parse: (str: string): object => { + const result = JSON.parse(str); + if (!(result instanceof Object) || Array.isArray(result)) { + throw new Error('must be an object'); + } + return result; + }, + function: (obj: object) => String(Object.keys(obj).length), + parameters: { type: 'object' }, + description: 'gets the number of properties on an object', + }, + { + name: 'keys', + parse: (str: string): object => { + const result = JSON.parse(str); + if (!(result instanceof Object)) { + throw new Error('must be an Object'); + } + return result; + }, + function: (obj: object) => Object.keys(obj).join(', '), + parameters: { type: 'object' }, + description: 'gets the number of properties on an object', + }, + { + name: 'len2', + parse: (str: string) => str, + // @ts-ignore error occurs here in TS 5 + // function input doesn't match parse output + function: (obj: object) => String(Object.keys(obj).length), + parameters: { type: 'object' }, + description: 'gets the number of properties on an object', + }, + ] as const, + }); +} + +describe('resource completions', () => { + describe('runFunctions with stream: false', () => { + test('successful flow', async () => { + const { fetch, handleRequest } = mockChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + functions: [ + { + function: function getWeather() { + return `it's raining`; + }, + parameters: {}, + description: 'gets the weather', + }, + ], + }); + const listener = new RunnerListener(runner); + + await Promise.all([ + handleRequest(async (request) => { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + return { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + message: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + handleRequest(async (request) => { + expect(request.messages).toEqual([ + { role: 'user', content: 'tell me what the weather is like' }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + { + role: 'function', + content: `it's raining`, + name: 'getWeather', + }, + ]); + return { + id: '2', + choices: [ + { + index: 0, + finish_reason: 'stop', + message: { + role: 'assistant', + content: `it's raining`, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + runner.done(), + ]); + + expect(listener.messages).toEqual([ + { role: 'user', content: 'tell me what the weather is like' }, + { role: 'assistant', content: null, function_call: { name: 'getWeather', arguments: '' } }, + { role: 'function', content: `it's raining`, name: 'getWeather' }, + { role: 'assistant', content: "it's raining" }, + ]); + expect(listener.functionCallResults).toEqual([`it's raining`]); + await listener.sanityCheck(); + }); + test('flow with abort', async () => { + const { fetch, handleRequest } = mockChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const controller = new AbortController(); + const runner = openai.beta.chat.completions.runFunctions( + { + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + functions: [ + { + function: function getWeather() { + return `it's raining`; + }, + parameters: {}, + description: 'gets the weather', + }, + ], + }, + { signal: controller.signal }, + ); + const listener = new RunnerListener(runner); + + await handleRequest(async (request) => { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + return { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + message: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }); + + controller.abort(); + + await runner.done().catch(() => {}); + + expect(listener.messages).toEqual([ + { role: 'user', content: 'tell me what the weather is like' }, + { role: 'assistant', content: null, function_call: { name: 'getWeather', arguments: '' } }, + { role: 'function', content: `it's raining`, name: 'getWeather' }, + ]); + expect(listener.functionCallResults).toEqual([`it's raining`]); + await listener.sanityCheck({ error: 'Request was aborted.' }); + expect(runner.aborted).toBe(true); + }); + test('successful flow with parse', async () => { + const { fetch, handleRequest } = mockChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + messages: [ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + ], + model: 'gpt-3.5-turbo', + functions: [ + new ParsingFunction({ + name: 'numProperties', + function: (obj: object) => String(Object.keys(obj).length), + parameters: { type: 'object' }, + parse: (str: string): object => { + const result = JSON.parse(str); + if (!(result instanceof Object) || Array.isArray(result)) { + throw new Error('must be an object'); + } + return result; + }, + description: 'gets the number of properties on an object', + }), + ], + }); + const listener = new RunnerListener(runner); + + await Promise.all([ + handleRequest(async (request) => { + expect(request.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + ]); + return { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + message: { + role: 'assistant', + content: null, + function_call: { + arguments: '{"a": 1, "b": 2, "c": 3}', + name: 'numProperties', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + usage: { + completion_tokens: 5, + prompt_tokens: 20, + total_tokens: 25, + }, + }; + }), + + handleRequest(async (request) => { + expect(request.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '{"a": 1, "b": 2, "c": 3}', + name: 'numProperties', + }, + }, + { + role: 'function', + content: '3', + name: 'numProperties', + }, + ]); + return { + id: '2', + choices: [ + { + index: 0, + finish_reason: 'stop', + message: { + role: 'assistant', + content: `there are 3 properties in {"a": 1, "b": 2, "c": 3}`, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + usage: { + completion_tokens: 10, + prompt_tokens: 25, + total_tokens: 35, + }, + }; + }), + + runner.done(), + ]); + + expect(listener.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + { + role: 'assistant', + content: null, + function_call: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' }, + }, + { role: 'function', content: '3', name: 'numProperties' }, + { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' }, + ]); + expect(listener.functionCallResults).toEqual(['3']); + await listener.sanityCheck(); + }); + test('flow with parse error', async () => { + const { fetch, handleRequest } = mockChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + messages: [ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + ], + model: 'gpt-3.5-turbo', + functions: [ + new ParsingFunction({ + name: 'numProperties', + function: (obj: object) => String(Object.keys(obj).length), + parameters: { type: 'object' }, + parse: (str: string): object => { + const result = JSON.parse(str); + if (!(result instanceof Object) || Array.isArray(result)) { + throw new Error('must be an object'); + } + return result; + }, + description: 'gets the number of properties on an object', + }), + ], + }); + const listener = new RunnerListener(runner); + + await Promise.all([ + handleRequest(async (request) => { + expect(request.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + ]); + return { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + message: { + role: 'assistant', + content: null, + function_call: { + arguments: '[{"a": 1, "b": 2, "c": 3}]', + name: 'numProperties', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + handleRequest(async (request) => { + expect(request.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '[{"a": 1, "b": 2, "c": 3}]', + name: 'numProperties', + }, + }, + { + role: 'function', + content: `must be an object`, + name: 'numProperties', + }, + ]); + return { + id: '2', + choices: [ + { + index: 0, + finish_reason: 'function_call', + message: { + role: 'assistant', + content: null, + function_call: { + arguments: '{"a": 1, "b": 2, "c": 3}', + name: 'numProperties', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + handleRequest(async (request) => { + expect(request.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '[{"a": 1, "b": 2, "c": 3}]', + name: 'numProperties', + }, + }, + { + role: 'function', + content: `must be an object`, + name: 'numProperties', + }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '{"a": 1, "b": 2, "c": 3}', + name: 'numProperties', + }, + }, + { + role: 'function', + content: '3', + name: 'numProperties', + }, + ]); + return { + id: '3', + choices: [ + { + index: 0, + finish_reason: 'stop', + message: { + role: 'assistant', + content: `there are 3 properties in {"a": 1, "b": 2, "c": 3}`, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + runner.done(), + ]); + + expect(listener.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + { + role: 'assistant', + content: null, + function_call: { name: 'numProperties', arguments: '[{"a": 1, "b": 2, "c": 3}]' }, + }, + { role: 'function', content: `must be an object`, name: 'numProperties' }, + { + role: 'assistant', + content: null, + function_call: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' }, + }, + { role: 'function', content: '3', name: 'numProperties' }, + { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' }, + ]); + expect(listener.functionCallResults).toEqual([`must be an object`, '3']); + await listener.sanityCheck(); + }); + test('single function call', async () => { + const { fetch, handleRequest } = mockChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + function_call: { + name: 'getWeather', + }, + functions: [ + { + function: function getWeather() { + return `it's raining`; + }, + parameters: {}, + description: 'gets the weather', + }, + ], + }); + const listener = new RunnerListener(runner); + + await Promise.all([ + handleRequest(async (request) => { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + return { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + message: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + runner.done(), + ]); + + expect(listener.messages).toEqual([ + { role: 'user', content: 'tell me what the weather is like' }, + { role: 'assistant', content: null, function_call: { name: 'getWeather', arguments: '' } }, + { role: 'function', content: `it's raining`, name: 'getWeather' }, + ]); + expect(listener.functionCallResults).toEqual([`it's raining`]); + await listener.sanityCheck(); + }); + test('wrong function name', async () => { + const { fetch, handleRequest } = mockChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + functions: [ + { + function: function getWeather() { + return `it's raining`; + }, + parameters: {}, + description: 'gets the weather', + }, + ], + }); + const listener = new RunnerListener(runner); + + await Promise.all([ + handleRequest(async (request) => { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + return { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + message: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'get_weather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + handleRequest(async (request) => { + expect(request.messages).toEqual([ + { role: 'user', content: 'tell me what the weather is like' }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'get_weather', + }, + }, + { + role: 'function', + content: `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + name: 'get_weather', + }, + ]); + return { + id: '2', + choices: [ + { + index: 0, + finish_reason: 'function_call', + message: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + handleRequest(async (request) => { + expect(request.messages).toEqual([ + { role: 'user', content: 'tell me what the weather is like' }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'get_weather', + }, + }, + { + role: 'function', + content: `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + name: 'get_weather', + }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + { + role: 'function', + content: `it's raining`, + name: 'getWeather', + }, + ]); + return { + id: '3', + choices: [ + { + index: 0, + finish_reason: 'stop', + message: { + role: 'assistant', + content: `it's raining`, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + runner.done(), + ]); + + expect(listener.messages).toEqual([ + { role: 'user', content: 'tell me what the weather is like' }, + { role: 'assistant', content: null, function_call: { name: 'get_weather', arguments: '' } }, + { + role: 'function', + content: `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + name: 'get_weather', + }, + { role: 'assistant', content: null, function_call: { name: 'getWeather', arguments: '' } }, + { role: 'function', content: `it's raining`, name: 'getWeather' }, + { role: 'assistant', content: "it's raining" }, + ]); + expect(listener.functionCallResults).toEqual([ + `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + `it's raining`, + ]); + await listener.sanityCheck(); + }); + test('wrong function name with single function call', async () => { + const { fetch, handleRequest } = mockChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + function_call: { + name: 'getWeather', + }, + functions: [ + { + function: function getWeather() { + return `it's raining`; + }, + parameters: {}, + description: 'gets the weather', + }, + ], + }); + const listener = new RunnerListener(runner); + + await Promise.all([ + handleRequest(async (request) => { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + return { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + message: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'get_weather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + runner.done(), + ]); + + expect(listener.messages).toEqual([ + { role: 'user', content: 'tell me what the weather is like' }, + { role: 'assistant', content: null, function_call: { name: 'get_weather', arguments: '' } }, + { + role: 'function', + content: `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + name: 'get_weather', + }, + ]); + expect(listener.functionCallResults).toEqual([ + `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + ]); + await listener.sanityCheck(); + }); + }); + describe('runFunctions with stream: true', () => { + test('successful flow', async () => { + const { fetch, handleRequest } = mockStreamingChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + stream: true, + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + functions: [ + { + function: function getWeather() { + return `it's raining`; + }, + parameters: {}, + description: 'gets the weather', + }, + ], + }); + const listener = new StreamingRunnerListener(runner); + + await Promise.all([ + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + yield { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + delta: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion.chunk', + }; + }), + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([ + { role: 'user', content: 'tell me what the weather is like' }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + { + role: 'function', + content: `it's raining`, + name: 'getWeather', + }, + ]); + for (const choice of contentChoiceDeltas(`it's raining`)) { + yield { + id: '2', + choices: [choice], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + } + }), + runner.done(), + ]); + + expect(listener.eventMessages).toEqual([ + { role: 'assistant', content: null, function_call: { name: 'getWeather', arguments: '' } }, + { role: 'function', content: `it's raining`, name: 'getWeather' }, + { role: 'assistant', content: "it's raining" }, + ]); + expect(listener.eventFunctionCallResults).toEqual([`it's raining`]); + await listener.sanityCheck(); + }); + test('flow with abort', async () => { + const { fetch, handleRequest } = mockStreamingChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const controller = new AbortController(); + const runner = openai.beta.chat.completions.runFunctions( + { + stream: true, + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + functions: [ + { + function: function getWeather() { + return `it's raining`; + }, + parameters: {}, + description: 'gets the weather', + }, + ], + }, + { signal: controller.signal }, + ); + runner.on('functionCallResult', () => controller.abort()); + const listener = new StreamingRunnerListener(runner); + + await handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + yield { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + delta: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }); + + await runner.done().catch(() => {}); + + expect(listener.eventMessages).toEqual([ + { role: 'assistant', content: null, function_call: { name: 'getWeather', arguments: '' } }, + { role: 'function', content: `it's raining`, name: 'getWeather' }, + ]); + expect(listener.eventFunctionCallResults).toEqual([`it's raining`]); + await listener.sanityCheck({ error: 'Request was aborted.' }); + expect(runner.aborted).toBe(true); + }); + test('successful flow with parse', async () => { + const { fetch, handleRequest } = mockStreamingChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + stream: true, + messages: [ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + ], + model: 'gpt-3.5-turbo', + functions: [ + new ParsingFunction({ + name: 'numProperties', + function: (obj: object) => String(Object.keys(obj).length), + parameters: { type: 'object' }, + parse: (str: string): object => { + const result = JSON.parse(str); + if (!(result instanceof Object) || Array.isArray(result)) { + throw new Error('must be an object'); + } + return result; + }, + description: 'gets the number of properties on an object', + }), + ], + }); + const listener = new StreamingRunnerListener(runner); + + await Promise.all([ + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + ]); + yield { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + delta: { + role: 'assistant', + content: null, + function_call: { + arguments: '{"a": 1, "b": 2, "c": 3}', + name: 'numProperties', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '{"a": 1, "b": 2, "c": 3}', + name: 'numProperties', + }, + }, + { + role: 'function', + content: '3', + name: 'numProperties', + }, + ]); + for (const choice of contentChoiceDeltas(`there are 3 properties in {"a": 1, "b": 2, "c": 3}`)) { + yield { + id: '2', + choices: [choice], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + } + }), + runner.done(), + ]); + + expect(listener.eventMessages).toEqual([ + { + role: 'assistant', + content: null, + function_call: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' }, + }, + { role: 'function', content: '3', name: 'numProperties' }, + { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' }, + ]); + expect(listener.eventFunctionCallResults).toEqual(['3']); + await listener.sanityCheck(); + }); + test('flow with parse error', async () => { + const { fetch, handleRequest } = mockStreamingChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + stream: true, + messages: [ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + ], + model: 'gpt-3.5-turbo', + functions: [ + new ParsingFunction({ + name: 'numProperties', + function: (obj: object) => String(Object.keys(obj).length), + parameters: { type: 'object' }, + parse: (str: string): object => { + const result = JSON.parse(str); + if (!(result instanceof Object) || Array.isArray(result)) { + throw new Error('must be an object'); + } + return result; + }, + description: 'gets the number of properties on an object', + }), + ], + }); + const listener = new StreamingRunnerListener(runner); + + await Promise.all([ + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + ]); + for (const choice of functionCallDeltas('[{"a": 1, "b": 2, "c": 3}]', { name: 'numProperties' })) { + yield { + id: '1', + choices: [choice], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + } + }), + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '[{"a": 1, "b": 2, "c": 3}]', + name: 'numProperties', + }, + }, + { + role: 'function', + content: `must be an object`, + name: 'numProperties', + }, + ]); + for (const choice of functionCallDeltas('{"a": 1, "b": 2, "c": 3}', { name: 'numProperties' })) { + yield { + id: '2', + choices: [choice], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + } + }), + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([ + { + role: 'user', + content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}', + }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '[{"a": 1, "b": 2, "c": 3}]', + name: 'numProperties', + }, + }, + { + role: 'function', + content: `must be an object`, + name: 'numProperties', + }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '{"a": 1, "b": 2, "c": 3}', + name: 'numProperties', + }, + }, + { + role: 'function', + content: '3', + name: 'numProperties', + }, + ]); + for (const choice of contentChoiceDeltas(`there are 3 properties in {"a": 1, "b": 2, "c": 3}`)) { + yield { + id: '3', + choices: [choice], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + } + }), + runner.done(), + ]); + + expect(listener.eventMessages).toEqual([ + { + role: 'assistant', + content: null, + function_call: { name: 'numProperties', arguments: '[{"a": 1, "b": 2, "c": 3}]' }, + }, + { role: 'function', content: `must be an object`, name: 'numProperties' }, + { + role: 'assistant', + content: null, + function_call: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' }, + }, + { role: 'function', content: '3', name: 'numProperties' }, + { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' }, + ]); + expect(listener.eventFunctionCallResults).toEqual([`must be an object`, '3']); + await listener.sanityCheck(); + }); + test('single function call', async () => { + const { fetch, handleRequest } = mockStreamingChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + stream: true, + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + function_call: { + name: 'getWeather', + }, + functions: [ + { + function: function getWeather() { + return `it's raining`; + }, + parameters: {}, + description: 'gets the weather', + }, + ], + }); + const listener = new StreamingRunnerListener(runner); + + await Promise.all([ + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + yield { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + delta: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + runner.done(), + ]); + + expect(listener.eventMessages).toEqual([ + { role: 'assistant', content: null, function_call: { name: 'getWeather', arguments: '' } }, + { role: 'function', content: `it's raining`, name: 'getWeather' }, + ]); + expect(listener.eventFunctionCallResults).toEqual([`it's raining`]); + await listener.sanityCheck(); + }); + test('wrong function name', async () => { + const { fetch, handleRequest } = mockStreamingChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + stream: true, + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + functions: [ + { + function: function getWeather() { + return `it's raining`; + }, + parameters: {}, + description: 'gets the weather', + }, + ], + }); + const listener = new StreamingRunnerListener(runner); + + await Promise.all([ + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + yield { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + delta: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'get_weather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([ + { role: 'user', content: 'tell me what the weather is like' }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'get_weather', + }, + }, + { + role: 'function', + content: `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + name: 'get_weather', + }, + ]); + yield { + id: '2', + choices: [ + { + index: 0, + finish_reason: 'function_call', + delta: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([ + { role: 'user', content: 'tell me what the weather is like' }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'get_weather', + }, + }, + { + role: 'function', + content: `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + name: 'get_weather', + }, + { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'getWeather', + }, + }, + { + role: 'function', + content: `it's raining`, + name: 'getWeather', + }, + ]); + for (const choice of contentChoiceDeltas(`it's raining`)) { + yield { + id: '3', + choices: [choice], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + } + }), + runner.done(), + ]); + + expect(listener.eventMessages).toEqual([ + { role: 'assistant', content: null, function_call: { name: 'get_weather', arguments: '' } }, + { + role: 'function', + content: `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + name: 'get_weather', + }, + { role: 'assistant', content: null, function_call: { name: 'getWeather', arguments: '' } }, + { role: 'function', content: `it's raining`, name: 'getWeather' }, + { role: 'assistant', content: "it's raining" }, + ]); + expect(listener.eventFunctionCallResults).toEqual([ + `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + `it's raining`, + ]); + await listener.sanityCheck(); + }); + test('wrong function name with single function call', async () => { + const { fetch, handleRequest } = mockStreamingChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.runFunctions({ + stream: true, + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + function_call: { + name: 'getWeather', + }, + functions: [ + { + function: function getWeather() { + return `it's raining`; + }, + parameters: {}, + description: 'gets the weather', + }, + ], + }); + const listener = new StreamingRunnerListener(runner); + + await Promise.all([ + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + yield { + id: '1', + choices: [ + { + index: 0, + finish_reason: 'function_call', + delta: { + role: 'assistant', + content: null, + function_call: { + arguments: '', + name: 'get_weather', + }, + }, + }, + ], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + }), + runner.done(), + ]); + + expect(listener.eventMessages).toEqual([ + { role: 'assistant', content: null, function_call: { name: 'get_weather', arguments: '' } }, + { + role: 'function', + content: `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + name: 'get_weather', + }, + ]); + expect(listener.eventFunctionCallResults).toEqual([ + `Invalid function_call: "get_weather". Available options are: "getWeather". Please try again`, + ]); + await listener.sanityCheck(); + }); + }); + describe('stream', () => { + test('successful flow', async () => { + const { fetch, handleRequest } = mockStreamingChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.stream({ + stream: true, + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + }); + + const listener = new StreamingRunnerListener(runner); + + await Promise.all([ + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + for (const choice of contentChoiceDeltas(`The weather is great today!`)) { + yield { + id: '1', + choices: [choice], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + } + }), + runner.done(), + ]); + + expect(listener.finalMessage).toEqual({ role: 'assistant', content: 'The weather is great today!' }); + await listener.sanityCheck(); + }); + test('toReadableStream and fromReadableStream', async () => { + const { fetch, handleRequest } = mockStreamingChatCompletionFetch(); + + const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch }); + + const runner = openai.beta.chat.completions.stream({ + stream: true, + messages: [{ role: 'user', content: 'tell me what the weather is like' }], + model: 'gpt-3.5-turbo', + }); + + const proxied = ChatCompletionStreamingRunner.fromReadableStream(runner.toReadableStream()); + const listener = new StreamingRunnerListener(proxied); + + await Promise.all([ + handleRequest(async function* (request): AsyncIterable { + expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]); + for (const choice of contentChoiceDeltas(`The weather is great today!`)) { + yield { + id: '1', + choices: [choice], + created: Math.floor(Date.now() / 1000), + model: 'gpt-3.5-turbo', + object: 'chat.completion', + }; + } + }), + proxied.done(), + ]); + + expect(listener.finalMessage).toEqual({ role: 'assistant', content: 'The weather is great today!' }); + await listener.sanityCheck(); + }); + }); +}); diff --git a/src/lib/ChatCompletionRunner.ts b/src/lib/ChatCompletionRunner.ts new file mode 100644 index 000000000..cb9bd4867 --- /dev/null +++ b/src/lib/ChatCompletionRunner.ts @@ -0,0 +1,53 @@ +import * as Core from 'openai/core'; +import { + type Completions, + type ChatCompletionMessage, + type ChatCompletionMessageParam, + type ChatCompletionCreateParams, + type ChatCompletionCreateParamsNonStreaming, +} from 'openai/resources/chat/completions'; +import { type RunnableFunctions, type BaseFunctionsArgs } from './RunnableFunction'; +import { + AbstractChatCompletionRunner, + AbstractChatCompletionRunnerEvents, +} from './AbstractChatCompletionRunner'; + +export interface ChatCompletionRunnerEvents extends AbstractChatCompletionRunnerEvents { + content: (content: string) => void; +} + +export type ChatCompletionFunctionRunnerParams = Omit< + ChatCompletionCreateParamsNonStreaming, + 'functions' +> & { + functions: RunnableFunctions; +}; + +export class ChatCompletionRunner extends AbstractChatCompletionRunner { + static runFunctions( + completions: Completions, + params: ChatCompletionFunctionRunnerParams, + options?: Core.RequestOptions & { maxChatCompletions?: number }, + ): ChatCompletionRunner { + const runner = new ChatCompletionRunner(); + runner._run(() => runner._runFunctions(completions, params, options)); + return runner; + } + + static createChatCompletion( + completions: Completions, + params: ChatCompletionCreateParams, + options?: Core.RequestOptions, + ): ChatCompletionRunner { + const runner = new ChatCompletionRunner(); + runner._run(() => runner._runChatCompletion(completions, params, options)); + return runner; + } + + override _addMessage(message: ChatCompletionMessage | ChatCompletionMessageParam) { + super._addMessage(message); + if (message.role === 'assistant' && message.content) { + this._emit('content', message.content); + } + } +} diff --git a/src/lib/ChatCompletionStream.ts b/src/lib/ChatCompletionStream.ts new file mode 100644 index 000000000..9b3e2a419 --- /dev/null +++ b/src/lib/ChatCompletionStream.ts @@ -0,0 +1,311 @@ +import * as Core from 'openai/core'; +import { OpenAIError, APIUserAbortError } from 'openai/error'; +import { + Completions, + type ChatCompletion, + type ChatCompletionChunk, + type ChatCompletionCreateParams, + type ChatCompletionCreateParamsStreaming, +} from 'openai/resources/chat/completions'; +import { + AbstractChatCompletionRunner, + type AbstractChatCompletionRunnerEvents, +} from './AbstractChatCompletionRunner'; +import { type ReadableStream } from 'openai/_shims/index'; +import { Stream } from 'openai/streaming'; + +export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents { + content: (contentDelta: string, contentSnapshot: string) => void; + chunk: (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => void; +} + +export type ChatCompletionStreamParams = ChatCompletionCreateParamsStreaming; + +export class ChatCompletionStream + extends AbstractChatCompletionRunner + implements AsyncIterable +{ + #currentChatCompletionSnapshot: ChatCompletionSnapshot | undefined; + + get currentChatCompletionSnapshot(): ChatCompletionSnapshot | undefined { + return this.#currentChatCompletionSnapshot; + } + + static fromReadableStream(stream: ReadableStream): ChatCompletionStream { + const runner = new ChatCompletionStream(); + runner._run(() => runner._fromReadableStream(stream)); + return runner; + } + + static createChatCompletion( + completions: Completions, + params: ChatCompletionCreateParams, + options?: Core.RequestOptions, + ): ChatCompletionStream { + const runner = new ChatCompletionStream(); + runner._run(() => runner._runChatCompletion(completions, params, options)); + return runner; + } + + #beginRequest() { + if (this.ended) return; + this.#currentChatCompletionSnapshot = undefined; + } + #addChunk(chunk: ChatCompletionChunk) { + if (this.ended) return; + const completion = this.#accumulateChatCompletion(chunk); + this._emit('chunk', chunk, completion); + const delta = chunk.choices[0]?.delta.content; + const snapshot = completion.choices[0]?.message; + if (delta != null && snapshot?.role === 'assistant' && snapshot?.content) { + this._emit('content', delta, snapshot.content); + } + } + #endRequest(): ChatCompletion { + if (this.ended) { + throw new OpenAIError(`stream has ended, this shouldn't happen`); + } + const snapshot = this.#currentChatCompletionSnapshot; + if (!snapshot) { + throw new OpenAIError(`request ended without sending any chunks`); + } + this.#currentChatCompletionSnapshot = undefined; + return finalizeChatCompletion(snapshot); + } + + protected override async _createChatCompletion( + completions: Completions, + params: ChatCompletionCreateParams, + options?: Core.RequestOptions, + ): Promise { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + this.#beginRequest(); + const stream = await completions.create( + { ...params, stream: true }, + { ...options, signal: this.controller.signal }, + ); + this._connected(); + for await (const chunk of stream) { + this.#addChunk(chunk); + } + if (stream.controller.signal?.aborted) { + throw new APIUserAbortError(); + } + return this._addChatCompletion(this.#endRequest()); + } + + protected async _fromReadableStream( + readableStream: ReadableStream, + options?: Core.RequestOptions, + ): Promise { + const signal = options?.signal; + if (signal) { + if (signal.aborted) this.controller.abort(); + signal.addEventListener('abort', () => this.controller.abort()); + } + this.#beginRequest(); + this._connected(); + const stream = Stream.fromReadableStream(readableStream, this.controller); + for await (const chunk of stream) { + this.#addChunk(chunk); + } + if (stream.controller.signal?.aborted) { + throw new APIUserAbortError(); + } + return this._addChatCompletion(this.#endRequest()); + } + + #accumulateChatCompletion(chunk: ChatCompletionChunk): ChatCompletionSnapshot { + let snapshot = this.#currentChatCompletionSnapshot; + if (!snapshot) { + const { choices, ...rest } = chunk; + this.#currentChatCompletionSnapshot = snapshot = { + ...rest, + choices: [], + }; + } + for (const { delta, finish_reason, index } of chunk.choices) { + let choice = snapshot.choices[index]; + if (!choice) snapshot.choices[index] = choice = { finish_reason, index, message: delta }; + else { + if (finish_reason) choice.finish_reason = finish_reason; + const { content, function_call, role } = delta; + if (content) choice.message.content = (choice.message.content || '') + content; + if (role) choice.message.role = role; + if (function_call) { + if (!choice.message.function_call) choice.message.function_call = function_call; + else { + if (function_call.arguments) + choice.message.function_call.arguments = + (choice.message.function_call.arguments || '') + function_call.arguments; + if (function_call.name) choice.message.function_call.name = function_call.name; + } + } + } + } + return snapshot; + } + + [Symbol.asyncIterator](): AsyncIterator { + const pushQueue: ChatCompletionChunk[] = []; + const readQueue: ((chunk: ChatCompletionChunk | undefined) => void)[] = []; + let done = false; + + this.on('chunk', (chunk) => { + const reader = readQueue.shift(); + if (reader) { + reader(chunk); + } else { + pushQueue.push(chunk); + } + }); + + this.on('end', () => { + done = true; + for (const reader of readQueue) { + reader(undefined); + } + readQueue.length = 0; + }); + + return { + next: async (): Promise> => { + if (!pushQueue.length) { + if (done) { + return { value: undefined, done: true }; + } + return new Promise((resolve) => readQueue.push(resolve)).then( + (chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true }), + ); + } + const chunk = pushQueue.shift()!; + return { value: chunk, done: false }; + }, + }; + } + + toReadableStream(): ReadableStream { + const stream = new Stream(this[Symbol.asyncIterator].bind(this), this.controller); + return stream.toReadableStream(); + } +} + +function finalizeChatCompletion(snapshot: ChatCompletionSnapshot): ChatCompletion { + const { id, choices, created, model } = snapshot; + return { + id, + choices: choices.map(({ message, finish_reason, index }): ChatCompletion.Choice => { + if (!finish_reason) throw new OpenAIError(`missing finish_reason for choice ${index}`); + const { content = null, function_call, role } = message; + if (!role) throw new OpenAIError(`missing role for choice ${index}`); + if (function_call) { + const { arguments: args, name } = function_call; + if (args == null) throw new OpenAIError(`missing function_call.arguments for choice ${index}`); + if (!name) throw new OpenAIError(`missing function_call.name for choice ${index}`); + return { message: { content, function_call: { arguments: args, name }, role }, finish_reason, index }; + } + return { message: { content: content, role }, finish_reason, index }; + }), + created, + model, + object: 'chat.completion', + }; +} + +/** + * Represents a streamed chunk of a chat completion response returned by model, + * based on the provided input. + */ +export interface ChatCompletionSnapshot { + /** + * A unique identifier for the chat completion. + */ + id: string; + + /** + * A list of chat completion choices. Can be more than one if `n` is greater + * than 1. + */ + choices: Array; + + /** + * The Unix timestamp (in seconds) of when the chat completion was created. + */ + created: number; + + /** + * The model to generate the completion. + */ + model: string; +} + +export namespace ChatCompletionSnapshot { + export interface Choice { + /** + * A chat completion delta generated by streamed model responses. + */ + message: Choice.Message; + + /** + * The reason the model stopped generating tokens. This will be `stop` if the model + * hit a natural stop point or a provided stop sequence, `length` if the maximum + * number of tokens specified in the request was reached, `content_filter` if + * content was omitted due to a flag from our content filters, or `function_call` + * if the model called a function. + */ + finish_reason: 'stop' | 'length' | 'function_call' | 'content_filter' | null; + + /** + * The index of the choice in the list of choices. + */ + index: number; + } + + export namespace Choice { + /** + * A chat completion delta generated by streamed model responses. + */ + export interface Message { + /** + * The contents of the chunk message. + */ + content?: string | null; + + /** + * The name and arguments of a function that should be called, as generated by the + * model. + */ + function_call?: Message.FunctionCall; + + /** + * The role of the author of this message. + */ + role?: 'system' | 'user' | 'assistant' | 'function'; + } + + export namespace Message { + /** + * The name and arguments of a function that should be called, as generated by the + * model. + */ + export interface FunctionCall { + /** + * The arguments to call the function with, as generated by the model in JSON + * format. Note that the model does not always generate valid JSON, and may + * hallucinate parameters not defined by your function schema. Validate the + * arguments in your code before calling your function. + */ + arguments?: string; + + /** + * The name of the function to call. + */ + name?: string; + } + } + } +} diff --git a/src/lib/ChatCompletionStreamingRunner.ts b/src/lib/ChatCompletionStreamingRunner.ts new file mode 100644 index 000000000..0057c7623 --- /dev/null +++ b/src/lib/ChatCompletionStreamingRunner.ts @@ -0,0 +1,54 @@ +import * as Core from 'openai/core'; +import { + Completions, + type ChatCompletionChunk, + type ChatCompletionCreateParams, + type ChatCompletionCreateParamsStreaming, +} from 'openai/resources/chat/completions'; +import { type AbstractChatCompletionRunnerEvents } from './AbstractChatCompletionRunner'; +import { type ReadableStream } from 'openai/_shims/index'; +import { type BaseFunctionsArgs, type RunnableFunctions } from './RunnableFunction'; +import { ChatCompletionSnapshot, ChatCompletionStream } from './ChatCompletionStream'; + +export interface ChatCompletionStreamEvents extends AbstractChatCompletionRunnerEvents { + content: (contentDelta: string, contentSnapshot: string) => void; + chunk: (chunk: ChatCompletionChunk, snapshot: ChatCompletionSnapshot) => void; +} + +export type ChatCompletionStreamingFunctionRunnerParams = Omit< + ChatCompletionCreateParamsStreaming, + 'functions' +> & { + functions: RunnableFunctions; +}; + +export class ChatCompletionStreamingRunner + extends ChatCompletionStream + implements AsyncIterable +{ + static override fromReadableStream(stream: ReadableStream): ChatCompletionStreamingRunner { + const runner = new ChatCompletionStreamingRunner(); + runner._run(() => runner._fromReadableStream(stream)); + return runner; + } + + static runFunctions( + completions: Completions, + params: ChatCompletionStreamingFunctionRunnerParams, + options?: Core.RequestOptions & { maxChatCompletions?: number }, + ): ChatCompletionStreamingRunner { + const runner = new ChatCompletionStreamingRunner(); + runner._run(() => runner._runFunctions(completions, params, options)); + return runner; + } + + static override createChatCompletion( + completions: Completions, + params: ChatCompletionCreateParams, + options?: Core.RequestOptions, + ): ChatCompletionStreamingRunner { + const runner = new ChatCompletionStreamingRunner(); + runner._run(() => runner._runChatCompletion(completions, params, options)); + return runner; + } +} diff --git a/src/lib/RunnableFunction.ts b/src/lib/RunnableFunction.ts new file mode 100644 index 000000000..1de9f04ca --- /dev/null +++ b/src/lib/RunnableFunction.ts @@ -0,0 +1,96 @@ +import { type ChatCompletionRunner } from './ChatCompletionRunner'; +import { type ChatCompletionStreamingRunner } from './ChatCompletionStreamingRunner'; +import { JSONSchema } from './jsonschema'; + +type PromiseOrValue = T | Promise; + +export type RunnableFunctionWithParse = { + /** + * @param args the return value from `parse`. + * @param runner the runner evaluating this callback. + * @returns a string to send back to OpenAI. + */ + function: ( + args: Args, + runner: ChatCompletionRunner | ChatCompletionStreamingRunner, + ) => PromiseOrValue; + /** + * @param input the raw args from the OpenAI function call. + * @returns the parsed arguments to pass to `function` + */ + parse: (input: string) => PromiseOrValue; + /** + * The parameters the function accepts, describes as a JSON Schema object. + */ + parameters: JSONSchema; + /** + * A description of what the function does, used by the model to choose when and how to call the function. + */ + description: string; + /** + * The name of the function to be called. Will default to function.name if omitted. + */ + name?: string | undefined; +}; + +export type RunnableFunctionWithoutParse = { + /** + * @param args the raw args from the OpenAI function call. + * @returns a string to send back to OpenAI + */ + function: ( + args: string, + runner: ChatCompletionRunner | ChatCompletionStreamingRunner, + ) => PromiseOrValue; + /** + * The parameters the function accepts, describes as a JSON Schema object. + */ + parameters: JSONSchema; + /** + * A description of what the function does, used by the model to choose when and how to call the function. + */ + description: string; + /** + * The name of the function to be called. Will default to function.name if omitted. + */ + name?: string | undefined; +}; + +export type RunnableFunction = + Args extends string ? RunnableFunctionWithoutParse + : Args extends object ? RunnableFunctionWithParse + : never; + +export function isRunnableFunctionWithParse( + fn: any, +): fn is RunnableFunctionWithParse { + return typeof (fn as any).parse === 'function'; +} + +export type BaseFunctionsArgs = readonly (object | string)[]; + +export type RunnableFunctions = + [any[]] extends [FunctionsArgs] ? readonly RunnableFunction[] + : { + [Index in keyof FunctionsArgs]: Index extends number ? RunnableFunction + : FunctionsArgs[Index]; + }; + +/** + * This is helper class for passing a `function` and `parse` where the `function` + * argument type matches the `parse` return type. + */ +export class ParsingFunction { + constructor(input: RunnableFunctionWithParse) { + this.function = input.function; + this.parse = input.parse; + this.parameters = input.parameters; + this.description = input.description; + this.name = input.name; + } + function: RunnableFunctionWithParse['function']; + parse: RunnableFunctionWithParse['parse']; + parameters: RunnableFunctionWithParse['parameters']; + description: RunnableFunctionWithParse['description']; + name?: RunnableFunctionWithParse['name']; +} diff --git a/src/lib/jsonschema.ts b/src/lib/jsonschema.ts new file mode 100644 index 000000000..636277705 --- /dev/null +++ b/src/lib/jsonschema.ts @@ -0,0 +1,148 @@ +// File mostly copied from @types/json-schema, but stripped down a bit for brevity +// https://github.com/DefinitelyTyped/DefinitelyTyped/blob/817274f3280152ba2929a6067c93df8b34c4c9aa/types/json-schema/index.d.ts +// +// ================================================================================================== +// JSON Schema Draft 07 +// ================================================================================================== +// https://tools.ietf.org/html/draft-handrews-json-schema-validation-01 +// -------------------------------------------------------------------------------------------------- + +/** + * Primitive type + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1 + */ +export type JSONSchemaTypeName = + | ({} & string) + | 'string' + | 'number' + | 'integer' + | 'boolean' + | 'object' + | 'array' + | 'null'; + +/** + * Primitive type + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1.1 + */ +export type JSONSchemaType = + | string // + | number + | boolean + | JSONSchemaObject + | JSONSchemaArray + | null; + +// Workaround for infinite type recursion +export interface JSONSchemaObject { + [key: string]: JSONSchemaType; +} + +// Workaround for infinite type recursion +// https://github.com/Microsoft/TypeScript/issues/3496#issuecomment-128553540 +export interface JSONSchemaArray extends Array {} + +/** + * Meta schema + * + * Recommended values: + * - 'http://json-schema.org/schema#' + * - 'http://json-schema.org/hyper-schema#' + * - 'http://json-schema.org/draft-07/schema#' + * - 'http://json-schema.org/draft-07/hyper-schema#' + * + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-5 + */ +export type JSONSchemaVersion = string; + +/** + * JSON Schema v7 + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01 + */ +export type JSONSchemaDefinition = JSONSchema | boolean; +export interface JSONSchema { + $id?: string | undefined; + $comment?: string | undefined; + + /** + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.1 + */ + type?: JSONSchemaTypeName | JSONSchemaTypeName[] | undefined; + enum?: JSONSchemaType[] | undefined; + const?: JSONSchemaType | undefined; + + /** + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.2 + */ + multipleOf?: number | undefined; + maximum?: number | undefined; + exclusiveMaximum?: number | undefined; + minimum?: number | undefined; + exclusiveMinimum?: number | undefined; + + /** + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.3 + */ + maxLength?: number | undefined; + minLength?: number | undefined; + pattern?: string | undefined; + + /** + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.4 + */ + items?: JSONSchemaDefinition | JSONSchemaDefinition[] | undefined; + additionalItems?: JSONSchemaDefinition | undefined; + maxItems?: number | undefined; + minItems?: number | undefined; + uniqueItems?: boolean | undefined; + contains?: JSONSchemaDefinition | undefined; + + /** + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.5 + */ + maxProperties?: number | undefined; + minProperties?: number | undefined; + required?: string[] | undefined; + properties?: + | { + [key: string]: JSONSchemaDefinition; + } + | undefined; + patternProperties?: + | { + [key: string]: JSONSchemaDefinition; + } + | undefined; + additionalProperties?: JSONSchemaDefinition | undefined; + propertyNames?: JSONSchemaDefinition | undefined; + + /** + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.6 + */ + if?: JSONSchemaDefinition | undefined; + then?: JSONSchemaDefinition | undefined; + else?: JSONSchemaDefinition | undefined; + + /** + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-6.7 + */ + allOf?: JSONSchemaDefinition[] | undefined; + anyOf?: JSONSchemaDefinition[] | undefined; + oneOf?: JSONSchemaDefinition[] | undefined; + not?: JSONSchemaDefinition | undefined; + + /** + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-7 + */ + format?: string | undefined; + + /** + * @see https://tools.ietf.org/html/draft-handrews-json-schema-validation-01#section-10 + */ + title?: string | undefined; + description?: string | undefined; + default?: JSONSchemaType | undefined; + readOnly?: boolean | undefined; + writeOnly?: boolean | undefined; + examples?: JSONSchemaType | undefined; +} diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts new file mode 100644 index 000000000..e76f34c83 --- /dev/null +++ b/src/resources/beta/beta.ts @@ -0,0 +1,12 @@ +// File generated from our OpenAPI spec by Stainless. + +import { APIResource } from 'openai/resource'; +import * as ChatAPI from 'openai/resources/beta/chat/chat'; + +export class Beta extends APIResource { + chat: ChatAPI.Chat = new ChatAPI.Chat(this.client); +} + +export namespace Beta { + export import Chat = ChatAPI.Chat; +} diff --git a/src/resources/beta/chat/chat.ts b/src/resources/beta/chat/chat.ts new file mode 100644 index 000000000..c871fab2f --- /dev/null +++ b/src/resources/beta/chat/chat.ts @@ -0,0 +1,12 @@ +// File generated from our OpenAPI spec by Stainless. + +import { APIResource } from 'openai/resource'; +import * as CompletionsAPI from 'openai/resources/beta/chat/completions'; + +export class Chat extends APIResource { + completions: CompletionsAPI.Completions = new CompletionsAPI.Completions(this.client); +} + +export namespace Chat { + export import Completions = CompletionsAPI.Completions; +} diff --git a/src/resources/beta/chat/completions.ts b/src/resources/beta/chat/completions.ts new file mode 100644 index 000000000..8bb23a789 --- /dev/null +++ b/src/resources/beta/chat/completions.ts @@ -0,0 +1,77 @@ +// File generated from our OpenAPI spec by Stainless. + +import * as Core from 'openai/core'; +import { APIResource } from 'openai/resource'; +import { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from 'openai/lib/ChatCompletionRunner'; +export { ChatCompletionRunner, ChatCompletionFunctionRunnerParams } from 'openai/lib/ChatCompletionRunner'; +import { + ChatCompletionStreamingRunner, + ChatCompletionStreamingFunctionRunnerParams, +} from 'openai/lib/ChatCompletionStreamingRunner'; +export { + ChatCompletionStreamingRunner, + ChatCompletionStreamingFunctionRunnerParams, +} from 'openai/lib/ChatCompletionStreamingRunner'; +import { BaseFunctionsArgs } from 'openai/lib/RunnableFunction'; +export { + RunnableFunction, + RunnableFunctions, + RunnableFunctionWithParse, + RunnableFunctionWithoutParse, + ParsingFunction, +} from 'openai/lib/RunnableFunction'; +import { ChatCompletionStream } from 'openai/lib/ChatCompletionStream'; +import { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions'; + +export class Completions extends APIResource { + /** + * A convenience helper for using function calls with the /chat/completions + * endpoint which automatically calls the JavaScript functions you provide and + * sends their results back to the /chat/completions endpoint, looping as long as + * the model requests function calls. + * + * For more details and examples, see + * [the docs](https://github.com/openai/openai-node#runFunctions) + */ + runFunctions( + body: ChatCompletionFunctionRunnerParams, + options?: Core.RequestOptions, + ): ChatCompletionRunner; + runFunctions( + body: ChatCompletionStreamingFunctionRunnerParams, + options?: Core.RequestOptions, + ): ChatCompletionStreamingRunner; + runFunctions( + body: + | ChatCompletionFunctionRunnerParams + | ChatCompletionStreamingFunctionRunnerParams, + options?: Core.RequestOptions, + ): ChatCompletionRunner | ChatCompletionStreamingRunner { + if (body.stream) { + return ChatCompletionStreamingRunner.runFunctions( + this.client.chat.completions, + body as ChatCompletionStreamingFunctionRunnerParams, + options, + ); + } + return ChatCompletionRunner.runFunctions( + this.client.chat.completions, + body as ChatCompletionFunctionRunnerParams, + options, + ); + } + + /** + * Creates a chat completion stream + */ + stream( + body: Omit & { stream?: true }, + options?: Core.RequestOptions, + ): ChatCompletionStream { + return ChatCompletionStream.createChatCompletion( + this.client.chat.completions, + { ...body, stream: true }, + options, + ); + } +} diff --git a/src/resources/beta/chat/index.ts b/src/resources/beta/chat/index.ts new file mode 100644 index 000000000..8d0ee40ae --- /dev/null +++ b/src/resources/beta/chat/index.ts @@ -0,0 +1,4 @@ +// File generated from our OpenAPI spec by Stainless. + +export { Chat } from './chat'; +export { Completions } from './completions'; diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts new file mode 100644 index 000000000..9d8daa323 --- /dev/null +++ b/src/resources/beta/index.ts @@ -0,0 +1,4 @@ +// File generated from our OpenAPI spec by Stainless. + +export { Beta } from './beta'; +export { Chat } from './chat/index'; diff --git a/src/resources/index.ts b/src/resources/index.ts index 16ea95f8b..3f2d78020 100644 --- a/src/resources/index.ts +++ b/src/resources/index.ts @@ -2,6 +2,7 @@ export * from './chat/index'; export { Audio } from './audio/audio'; +export { Beta } from './beta/beta'; export { Completion, CompletionChoice, diff --git a/src/streaming.ts b/src/streaming.ts index 33f641888..4482b8d02 100644 --- a/src/streaming.ts +++ b/src/streaming.ts @@ -91,8 +91,10 @@ export class Stream implements AsyncIterable { return new Stream(iterator, controller); } - // Generates a Stream from a newline-separated ReadableStream where each item - // is a JSON Value. + /** + * Generates a Stream from a newline-separated ReadableStream + * where each item is a JSON value. + */ static fromReadableStream(readableStream: ReadableStream, controller: AbortController) { let consumed = false; @@ -140,6 +142,10 @@ export class Stream implements AsyncIterable { return this.iterator(); } + /** + * Splits the stream into two streams which can be + * independently read from at different speeds. + */ tee(): [Stream, Stream] { const left: Array>> = []; const right: Array>> = []; @@ -164,8 +170,11 @@ export class Stream implements AsyncIterable { ]; } - // Converts this stream to a newline-separated ReadableStream of JSON Stringified values in the stream - // which can be turned back into a Stream with Stream.fromReadableStream. + /** + * Converts this stream to a newline-separated ReadableStream of + * JSON stringified values in the stream + * which can be turned back into a Stream with `Stream.fromReadableStream()`. + */ toReadableStream(): ReadableStream { const self = this; let iter: AsyncIterator;