Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(api): add optional name argument + improve docs #569

Merged
merged 1 commit into from
Dec 15, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 3 additions & 5 deletions src/lib/AbstractChatCompletionRunner.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ import {
type ChatCompletionMessage,
type ChatCompletionMessageParam,
type ChatCompletionCreateParams,
type ChatCompletionAssistantMessageParam,
type ChatCompletionTool,
} from 'openai/resources/chat/completions';
import { APIUserAbortError, OpenAIError } from 'openai/error';
Expand Down Expand Up @@ -90,7 +89,6 @@ export abstract class AbstractChatCompletionRunner<
}

protected _addMessage(message: ChatCompletionMessageParam, emit = true) {
// @ts-expect-error this works around a bug in the Azure OpenAI API in which `content` is missing instead of null.
if (!('content' in message)) message.content = null;

this.messages.push(message);
Expand Down Expand Up @@ -217,7 +215,7 @@ export abstract class AbstractChatCompletionRunner<
}

#getFinalContent(): string | null {
return this.#getFinalMessage().content;
return this.#getFinalMessage().content ?? null;
}

/**
Expand All @@ -229,12 +227,12 @@ export abstract class AbstractChatCompletionRunner<
return this.#getFinalContent();
}

#getFinalMessage(): ChatCompletionAssistantMessageParam {
#getFinalMessage(): ChatCompletionMessage {
let i = this.messages.length;
while (i-- > 0) {
const message = this.messages[i];
if (isAssistantMessage(message)) {
return message;
return { ...message, content: message.content ?? null };
}
}
throw new OpenAIError('stream ended without producing a ChatCompletionMessage with role=assistant');
Expand Down
4 changes: 3 additions & 1 deletion src/resources/audio/speech.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ export interface SpeechCreateParams {

/**
* The voice to use when generating the audio. Supported voices are `alloy`,
* `echo`, `fable`, `onyx`, `nova`, and `shimmer`.
* `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are
* available in the
* [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech/voice-options).
*/
voice: 'alloy' | 'echo' | 'fable' | 'onyx' | 'nova' | 'shimmer';

Expand Down
91 changes: 56 additions & 35 deletions src/resources/chat/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -105,21 +105,28 @@ export namespace ChatCompletion {

export interface ChatCompletionAssistantMessageParam {
/**
* The contents of the assistant message.
* The role of the messages author, in this case `assistant`.
*/
content: string | null;
role: 'assistant';

/**
* The role of the messages author, in this case `assistant`.
* The contents of the assistant message. Required unless `tool_calls` or
* `function_call` is specified.
*/
role: 'assistant';
content?: string | null;

/**
* Deprecated and replaced by `tool_calls`. The name and arguments of a function
* that should be called, as generated by the model.
*/
function_call?: ChatCompletionAssistantMessageParam.FunctionCall;

/**
* An optional name for the participant. Provides the model information to
* differentiate between participants of the same role.
*/
name?: string;

/**
* The tool calls generated by the model, such as function calls.
*/
Expand Down Expand Up @@ -309,7 +316,8 @@ export namespace ChatCompletionContentPartImage {
url: string;

/**
* Specifies the detail level of the image.
* Specifies the detail level of the image. Learn more in the
* [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding).
*/
detail?: 'auto' | 'low' | 'high';
}
Expand Down Expand Up @@ -340,9 +348,9 @@ export interface ChatCompletionFunctionCallOption {

export interface ChatCompletionFunctionMessageParam {
/**
* The return value from the function call, to return to the model.
* The contents of the function message.
*/
content: string | null;
content: string;

/**
* The name of the function to call.
Expand Down Expand Up @@ -451,12 +459,12 @@ export namespace ChatCompletionMessageToolCall {
* function.
*/
export interface ChatCompletionNamedToolChoice {
function?: ChatCompletionNamedToolChoice.Function;
function: ChatCompletionNamedToolChoice.Function;

/**
* The type of the tool. Currently, only `function` is supported.
*/
type?: 'function';
type: 'function';
}

export namespace ChatCompletionNamedToolChoice {
Expand All @@ -477,12 +485,18 @@ export interface ChatCompletionSystemMessageParam {
/**
* The contents of the system message.
*/
content: string | null;
content: string;

/**
* The role of the messages author, in this case `system`.
*/
role: 'system';

/**
* An optional name for the participant. Provides the model information to
* differentiate between participants of the same role.
*/
name?: string;
}

export interface ChatCompletionTool {
Expand Down Expand Up @@ -511,7 +525,7 @@ export interface ChatCompletionToolMessageParam {
/**
* The contents of the tool message.
*/
content: string | null;
content: string;

/**
* The role of the messages author, in this case `tool`.
Expand All @@ -528,12 +542,18 @@ export interface ChatCompletionUserMessageParam {
/**
* The contents of the user message.
*/
content: string | Array<ChatCompletionContentPart> | null;
content: string | Array<ChatCompletionContentPart>;

/**
* The role of the messages author, in this case `user`.
*/
role: 'user';

/**
* An optional name for the participant. Provides the model information to
* differentiate between participants of the same role.
*/
name?: string;
}

/**
Expand Down Expand Up @@ -567,19 +587,19 @@ export interface ChatCompletionCreateParamsBase {
| 'gpt-4-32k'
| 'gpt-4-32k-0314'
| 'gpt-4-32k-0613'
| 'gpt-3.5-turbo-1106'
| 'gpt-3.5-turbo'
| 'gpt-3.5-turbo-16k'
| 'gpt-3.5-turbo-0301'
| 'gpt-3.5-turbo-0613'
| 'gpt-3.5-turbo-1106'
| 'gpt-3.5-turbo-16k-0613';

/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their
* existing frequency in the text so far, decreasing the model's likelihood to
* repeat the same line verbatim.
*
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
*/
frequency_penalty?: number | null;

Expand Down Expand Up @@ -627,7 +647,9 @@ export interface ChatCompletionCreateParamsBase {
max_tokens?: number | null;

/**
* How many chat completion choices to generate for each input message.
* How many chat completion choices to generate for each input message. Note that
* you will be charged based on the number of generated tokens across all of the
* choices. Keep `n` as `1` to minimize costs.
*/
n?: number | null;

Expand All @@ -636,7 +658,7 @@ export interface ChatCompletionCreateParamsBase {
* whether they appear in the text so far, increasing the model's likelihood to
* talk about new topics.
*
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
*/
presence_penalty?: number | null;

Expand All @@ -649,10 +671,10 @@ export interface ChatCompletionCreateParamsBase {
* **Important:** when using JSON mode, you **must** also instruct the model to
* produce JSON yourself via a system or user message. Without this, the model may
* generate an unending stream of whitespace until the generation reaches the token
* limit, resulting in increased latency and appearance of a "stuck" request. Also
* note that the message content may be partially cut off if
* `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
* or the conversation exceeded the max context length.
* limit, resulting in a long-running and seemingly "stuck" request. Also note that
* the message content may be partially cut off if `finish_reason="length"`, which
* indicates the generation exceeded `max_tokens` or the conversation exceeded the
* max context length.
*/
response_format?: ChatCompletionCreateParams.ResponseFormat;

Expand Down Expand Up @@ -734,23 +756,22 @@ export namespace ChatCompletionCreateParams {
*/
name: string;

/**
* A description of what the function does, used by the model to choose when and
* how to call the function.
*/
description?: string;

/**
* The parameters the functions accepts, described as a JSON Schema object. See the
* [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
* examples, and the
* [guide](https://platform.openai.com/docs/guides/text-generation/function-calling)
* for examples, and the
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
* documentation about the format.
*
* To describe a function that accepts no parameters, provide the value
* `{"type": "object", "properties": {}}`.
*/
parameters: Shared.FunctionParameters;

/**
* A description of what the function does, used by the model to choose when and
* how to call the function.
* Omitting `parameters` defines a function with an empty parameter list.
*/
description?: string;
parameters?: Shared.FunctionParameters;
}

/**
Expand All @@ -762,10 +783,10 @@ export namespace ChatCompletionCreateParams {
* **Important:** when using JSON mode, you **must** also instruct the model to
* produce JSON yourself via a system or user message. Without this, the model may
* generate an unending stream of whitespace until the generation reaches the token
* limit, resulting in increased latency and appearance of a "stuck" request. Also
* note that the message content may be partially cut off if
* `finish_reason="length"`, which indicates the generation exceeded `max_tokens`
* or the conversation exceeded the max context length.
* limit, resulting in a long-running and seemingly "stuck" request. Also note that
* the message content may be partially cut off if `finish_reason="length"`, which
* indicates the generation exceeded `max_tokens` or the conversation exceeded the
* max context length.
*/
export interface ResponseFormat {
/**
Expand Down
4 changes: 2 additions & 2 deletions src/resources/completions.ts
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ export interface CompletionCreateParamsBase {
* existing frequency in the text so far, decreasing the model's likelihood to
* repeat the same line verbatim.
*
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
*/
frequency_penalty?: number | null;

Expand Down Expand Up @@ -232,7 +232,7 @@ export interface CompletionCreateParamsBase {
* whether they appear in the text so far, increasing the model's likelihood to
* talk about new topics.
*
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/gpt/parameter-details)
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
*/
presence_penalty?: number | null;

Expand Down
3 changes: 2 additions & 1 deletion src/resources/embeddings.ts
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,8 @@ export interface EmbeddingCreateParams {
* Input text to embed, encoded as a string or array of tokens. To embed multiple
* inputs in a single request, pass an array of strings or array of token arrays.
* The input must not exceed the max input tokens for the model (8192 tokens for
* `text-embedding-ada-002`) and cannot be an empty string.
* `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048
* dimensions or less.
* [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
* for counting tokens.
*/
Expand Down
6 changes: 3 additions & 3 deletions src/resources/files.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ import { Page } from 'openai/pagination';

export class Files extends APIResource {
/**
* Upload a file that can be used across various endpoints/features. The size of
* all the files uploaded by one organization can be up to 100 GB.
* Upload a file that can be used across various endpoints. The size of all the
* files uploaded by one organization can be up to 100 GB.
*
* The size of individual files for can be a maximum of 512MB. See the
* The size of individual files can be a maximum of 512 MB. See the
* [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) to
* learn more about the types of files supported. The Fine-tuning API only supports
* `.jsonl` files.
Expand Down
28 changes: 13 additions & 15 deletions src/resources/shared.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,33 +7,31 @@ export interface FunctionDefinition {
*/
name: string;

/**
* A description of what the function does, used by the model to choose when and
* how to call the function.
*/
description?: string;

/**
* The parameters the functions accepts, described as a JSON Schema object. See the
* [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
* examples, and the
* [guide](https://platform.openai.com/docs/guides/text-generation/function-calling)
* for examples, and the
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
* documentation about the format.
*
* To describe a function that accepts no parameters, provide the value
* `{"type": "object", "properties": {}}`.
* Omitting `parameters` defines a function with an empty parameter list.
*/
parameters: FunctionParameters;

/**
* A description of what the function does, used by the model to choose when and
* how to call the function.
*/
description?: string;
parameters?: FunctionParameters;
}

/**
* The parameters the functions accepts, described as a JSON Schema object. See the
* [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for
* examples, and the
* [guide](https://platform.openai.com/docs/guides/text-generation/function-calling)
* for examples, and the
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
* documentation about the format.
*
* To describe a function that accepts no parameters, provide the value
* `{"type": "object", "properties": {}}`.
* Omitting `parameters` defines a function with an empty parameter list.
*/
export type FunctionParameters = Record<string, unknown>;
8 changes: 2 additions & 6 deletions tests/api-resources/beta/assistants/files.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,7 @@ const openai = new OpenAI({

describe('resource files', () => {
test('create: only required params', async () => {
const responsePromise = openai.beta.assistants.files.create('file-AF1WoRqd3aJAHsqc9NY7iL8F', {
file_id: 'string',
});
const responsePromise = openai.beta.assistants.files.create('file-abc123', { file_id: 'string' });
const rawResponse = await responsePromise.asResponse();
expect(rawResponse).toBeInstanceOf(Response);
const response = await responsePromise;
Expand All @@ -23,9 +21,7 @@ describe('resource files', () => {
});

test('create: required and optional params', async () => {
const response = await openai.beta.assistants.files.create('file-AF1WoRqd3aJAHsqc9NY7iL8F', {
file_id: 'string',
});
const response = await openai.beta.assistants.files.create('file-abc123', { file_id: 'string' });
});

test('retrieve', async () => {
Expand Down
Loading