diff --git a/.stats.yml b/.stats.yml
index e550e0bd..19e9daeb 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
-configured_endpoints: 10
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-73443ebfebee64b8ec0ebbacd2521d6b6aa900e9526ec97abdcbcff0c0955d9b.yml
+configured_endpoints: 19
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/anthropic-be055148d227480fcacc9086c37ac8009dcb487731069ada51af35044f65bee4.yml
diff --git a/api.md b/api.md
index f9b6cd2c..48d1c9a8 100644
--- a/api.md
+++ b/api.md
@@ -1,50 +1,103 @@
# Anthropic
+# Shared
+
+Types:
+
+- APIErrorObject
+- AuthenticationError
+- BillingError
+- ErrorObject
+- ErrorResponse
+- GatewayTimeoutError
+- InvalidRequestError
+- NotFoundError
+- OverloadedError
+- PermissionError
+- RateLimitError
+
# Messages
Types:
-- ContentBlock
-- ContentBlockDeltaEvent
-- ContentBlockParam
-- ContentBlockStartEvent
-- ContentBlockStopEvent
-- ImageBlockParam
-- InputJSONDelta
-- Message
-- MessageDeltaEvent
-- MessageDeltaUsage
-- MessageParam
-- MessageStartEvent
-- MessageStopEvent
-- MessageStreamEvent
-- Metadata
-- Model
-- RawContentBlockDeltaEvent
-- RawContentBlockStartEvent
-- RawContentBlockStopEvent
-- RawMessageDeltaEvent
-- RawMessageStartEvent
-- RawMessageStopEvent
-- RawMessageStreamEvent
-- TextBlock
-- TextBlockParam
-- TextDelta
-- Tool
-- ToolChoice
-- ToolChoiceAny
-- ToolChoiceAuto
-- ToolChoiceTool
-- ToolResultBlockParam
-- ToolUseBlock
-- ToolUseBlockParam
-- Usage
+- Base64PDFSource
+- CacheControlEphemeral
+- ContentBlock
+- ContentBlockDeltaEvent
+- ContentBlockParam
+- ContentBlockStartEvent
+- ContentBlockStopEvent
+- DocumentBlockParam
+- ImageBlockParam
+- InputJSONDelta
+- Message
+- MessageDeltaEvent
+- MessageDeltaUsage
+- MessageParam
+- MessageStartEvent
+- MessageStopEvent
+- MessageStreamEvent
+- MessageTokensCount
+- Metadata
+- Model
+- RawContentBlockDeltaEvent
+- RawContentBlockStartEvent
+- RawContentBlockStopEvent
+- RawMessageDeltaEvent
+- RawMessageStartEvent
+- RawMessageStopEvent
+- RawMessageStreamEvent
+- TextBlock
+- TextBlockParam
+- TextDelta
+- Tool
+- ToolChoice
+- ToolChoiceAny
+- ToolChoiceAuto
+- ToolChoiceTool
+- ToolResultBlockParam
+- ToolUseBlock
+- ToolUseBlockParam
+- Usage
Methods:
-- client.messages.create({ ...params }) -> Message
+- client.messages.create({ ...params }) -> Message
+- client.messages.countTokens({ ...params }) -> MessageTokensCount
- client.messages.stream(body, options?) -> MessageStream
+## Batches
+
+Types:
+
+- MessageBatch
+- MessageBatchCanceledResult
+- MessageBatchErroredResult
+- MessageBatchExpiredResult
+- MessageBatchIndividualResponse
+- MessageBatchRequestCounts
+- MessageBatchResult
+- MessageBatchSucceededResult
+
+Methods:
+
+- client.messages.batches.create({ ...params }) -> MessageBatch
+- client.messages.batches.retrieve(messageBatchId) -> MessageBatch
+- client.messages.batches.list({ ...params }) -> MessageBatchesPage
+- client.messages.batches.cancel(messageBatchId) -> MessageBatch
+- client.messages.batches.results(messageBatchId) -> Response
+
+# Models
+
+Types:
+
+- ModelInfo
+
+Methods:
+
+- client.models.retrieve(modelId) -> ModelInfo
+- client.models.list({ ...params }) -> ModelInfosPage
+
# Beta
Types:
@@ -52,14 +105,27 @@ Types:
- AnthropicBeta
- BetaAPIError
- BetaAuthenticationError
+- BetaBillingError
- BetaError
- BetaErrorResponse
+- BetaGatewayTimeoutError
- BetaInvalidRequestError
- BetaNotFoundError
- BetaOverloadedError
- BetaPermissionError
- BetaRateLimitError
+## Models
+
+Types:
+
+- BetaModelInfo
+
+Methods:
+
+- client.beta.models.retrieve(modelId) -> BetaModelInfo
+- client.beta.models.list({ ...params }) -> BetaModelInfosPage
+
## Messages
Types:
@@ -125,26 +191,3 @@ Methods:
- client.beta.messages.batches.list({ ...params }) -> BetaMessageBatchesPage
- client.beta.messages.batches.cancel(messageBatchId, { ...params }) -> BetaMessageBatch
- client.beta.messages.batches.results(messageBatchId, { ...params }) -> Response
-
-## PromptCaching
-
-### Messages
-
-Types:
-
-- PromptCachingBetaCacheControlEphemeral
-- PromptCachingBetaImageBlockParam
-- PromptCachingBetaMessage
-- PromptCachingBetaMessageParam
-- PromptCachingBetaTextBlockParam
-- PromptCachingBetaTool
-- PromptCachingBetaToolResultBlockParam
-- PromptCachingBetaToolUseBlockParam
-- PromptCachingBetaUsage
-- RawPromptCachingBetaMessageStartEvent
-- RawPromptCachingBetaMessageStreamEvent
-
-Methods:
-
-- client.beta.promptCaching.messages.create({ ...params }) -> PromptCachingBetaMessage
-- client.beta.promptCaching.messages.stream({ ...params }) -> PromptCachingBetaMessageStream
diff --git a/examples/count-tokens.ts b/examples/count-tokens.ts
new file mode 100755
index 00000000..e69de29b
diff --git a/src/index.ts b/src/index.ts
index 563aa131..bfca4fc8 100644
--- a/src/index.ts
+++ b/src/index.ts
@@ -14,15 +14,35 @@ import {
CompletionCreateParamsStreaming,
Completions,
} from './resources/completions';
+import { ModelInfo, ModelInfosPage, ModelListParams, Models } from './resources/models';
import {
+ AnthropicBeta,
+ Beta,
+ BetaAPIError,
+ BetaAuthenticationError,
+ BetaBillingError,
+ BetaError,
+ BetaErrorResponse,
+ BetaGatewayTimeoutError,
+ BetaInvalidRequestError,
+ BetaNotFoundError,
+ BetaOverloadedError,
+ BetaPermissionError,
+ BetaRateLimitError,
+} from './resources/beta/beta';
+import {
+ Base64PDFSource,
+ CacheControlEphemeral,
ContentBlock,
ContentBlockDeltaEvent,
ContentBlockParam,
ContentBlockStartEvent,
ContentBlockStopEvent,
+ DocumentBlockParam,
ImageBlockParam,
InputJSONDelta,
Message,
+ MessageCountTokensParams,
MessageCreateParams,
MessageCreateParamsNonStreaming,
MessageCreateParamsStreaming,
@@ -33,6 +53,7 @@ import {
MessageStopEvent,
MessageStreamEvent,
MessageStreamParams,
+ MessageTokensCount,
Messages,
Metadata,
Model,
@@ -55,20 +76,7 @@ import {
ToolUseBlock,
ToolUseBlockParam,
Usage,
-} from './resources/messages';
-import {
- AnthropicBeta,
- Beta,
- BetaAPIError,
- BetaAuthenticationError,
- BetaError,
- BetaErrorResponse,
- BetaInvalidRequestError,
- BetaNotFoundError,
- BetaOverloadedError,
- BetaPermissionError,
- BetaRateLimitError,
-} from './resources/beta/beta';
+} from './resources/messages/messages';
export interface ClientOptions {
/**
@@ -202,6 +210,7 @@ export class Anthropic extends Core.APIClient {
completions: API.Completions = new API.Completions(this);
messages: API.Messages = new API.Messages(this);
+ models: API.Models = new API.Models(this);
beta: API.Beta = new API.Beta(this);
protected override defaultQuery(): Core.DefaultQuery | undefined {
@@ -292,6 +301,8 @@ export class Anthropic extends Core.APIClient {
Anthropic.Completions = Completions;
Anthropic.Messages = Messages;
+Anthropic.Models = Models;
+Anthropic.ModelInfosPage = ModelInfosPage;
Anthropic.Beta = Beta;
export declare namespace Anthropic {
export type RequestOptions = Core.RequestOptions;
@@ -309,11 +320,14 @@ export declare namespace Anthropic {
export {
Messages as Messages,
+ type Base64PDFSource as Base64PDFSource,
+ type CacheControlEphemeral as CacheControlEphemeral,
type ContentBlock as ContentBlock,
type ContentBlockDeltaEvent as ContentBlockDeltaEvent,
type ContentBlockParam as ContentBlockParam,
type ContentBlockStartEvent as ContentBlockStartEvent,
type ContentBlockStopEvent as ContentBlockStopEvent,
+ type DocumentBlockParam as DocumentBlockParam,
type ImageBlockParam as ImageBlockParam,
type InputJSONDelta as InputJSONDelta,
type Message as Message,
@@ -323,6 +337,7 @@ export declare namespace Anthropic {
type MessageStartEvent as MessageStartEvent,
type MessageStopEvent as MessageStopEvent,
type MessageStreamEvent as MessageStreamEvent,
+ type MessageTokensCount as MessageTokensCount,
type Metadata as Metadata,
type Model as Model,
type RawContentBlockDeltaEvent as RawContentBlockDeltaEvent,
@@ -348,6 +363,14 @@ export declare namespace Anthropic {
type MessageCreateParamsNonStreaming as MessageCreateParamsNonStreaming,
type MessageCreateParamsStreaming as MessageCreateParamsStreaming,
type MessageStreamParams as MessageStreamParams,
+ type MessageCountTokensParams as MessageCountTokensParams,
+ };
+
+ export {
+ Models as Models,
+ type ModelInfo as ModelInfo,
+ ModelInfosPage as ModelInfosPage,
+ type ModelListParams as ModelListParams,
};
export {
@@ -355,14 +378,28 @@ export declare namespace Anthropic {
type AnthropicBeta as AnthropicBeta,
type BetaAPIError as BetaAPIError,
type BetaAuthenticationError as BetaAuthenticationError,
+ type BetaBillingError as BetaBillingError,
type BetaError as BetaError,
type BetaErrorResponse as BetaErrorResponse,
+ type BetaGatewayTimeoutError as BetaGatewayTimeoutError,
type BetaInvalidRequestError as BetaInvalidRequestError,
type BetaNotFoundError as BetaNotFoundError,
type BetaOverloadedError as BetaOverloadedError,
type BetaPermissionError as BetaPermissionError,
type BetaRateLimitError as BetaRateLimitError,
};
+
+ export type APIErrorObject = API.APIErrorObject;
+ export type AuthenticationError = API.AuthenticationError;
+ export type BillingError = API.BillingError;
+ export type ErrorObject = API.ErrorObject;
+ export type ErrorResponse = API.ErrorResponse;
+ export type GatewayTimeoutError = API.GatewayTimeoutError;
+ export type InvalidRequestError = API.InvalidRequestError;
+ export type NotFoundError = API.NotFoundError;
+ export type OverloadedError = API.OverloadedError;
+ export type PermissionError = API.PermissionError;
+ export type RateLimitError = API.RateLimitError;
}
export const { HUMAN_PROMPT, AI_PROMPT } = Anthropic;
diff --git a/src/lib/PromptCachingBetaMessageStream.ts b/src/lib/PromptCachingBetaMessageStream.ts
deleted file mode 100644
index 0e742cba..00000000
--- a/src/lib/PromptCachingBetaMessageStream.ts
+++ /dev/null
@@ -1,579 +0,0 @@
-import * as Core from '@anthropic-ai/sdk/core';
-import { AnthropicError, APIUserAbortError } from '@anthropic-ai/sdk/error';
-import { type ContentBlock, type TextBlock } from '@anthropic-ai/sdk/resources/messages';
-import {
- Messages,
- type PromptCachingBetaMessage,
- type RawPromptCachingBetaMessageStreamEvent,
- type PromptCachingBetaMessageParam,
- type MessageCreateParams,
- type MessageCreateParamsBase,
-} from '@anthropic-ai/sdk/resources/beta/prompt-caching/messages';
-import { type ReadableStream } from '@anthropic-ai/sdk/_shims/index';
-import { Stream } from '@anthropic-ai/sdk/streaming';
-import { partialParse } from '../_vendor/partial-json-parser/parser';
-
-export interface PromptCachingBetaMessageStreamEvents {
- connect: () => void;
- streamEvent: (event: RawPromptCachingBetaMessageStreamEvent, snapshot: PromptCachingBetaMessage) => void;
- text: (textDelta: string, textSnapshot: string) => void;
- inputJson: (partialJson: string, jsonSnapshot: unknown) => void;
- message: (message: PromptCachingBetaMessage) => void;
- contentBlock: (content: ContentBlock) => void;
- finalPromptCachingBetaMessage: (message: PromptCachingBetaMessage) => void;
- error: (error: AnthropicError) => void;
- abort: (error: APIUserAbortError) => void;
- end: () => void;
-}
-
-type PromptCachingBetaMessageStreamEventListeners =
- {
- listener: PromptCachingBetaMessageStreamEvents[Event];
- once?: boolean;
- }[];
-
-const JSON_BUF_PROPERTY = '__json_buf';
-
-export class PromptCachingBetaMessageStream implements AsyncIterable {
- messages: PromptCachingBetaMessageParam[] = [];
- receivedMessages: PromptCachingBetaMessage[] = [];
- #currentMessageSnapshot: PromptCachingBetaMessage | undefined;
-
- controller: AbortController = new AbortController();
-
- #connectedPromise: Promise;
- #resolveConnectedPromise: () => void = () => {};
- #rejectConnectedPromise: (error: AnthropicError) => void = () => {};
-
- #endPromise: Promise;
- #resolveEndPromise: () => void = () => {};
- #rejectEndPromise: (error: AnthropicError) => void = () => {};
-
- #listeners: {
- [Event in keyof PromptCachingBetaMessageStreamEvents]?: PromptCachingBetaMessageStreamEventListeners;
- } = {};
-
- #ended = false;
- #errored = false;
- #aborted = false;
- #catchingPromiseCreated = false;
-
- constructor() {
- this.#connectedPromise = new Promise((resolve, reject) => {
- this.#resolveConnectedPromise = resolve;
- this.#rejectConnectedPromise = reject;
- });
-
- this.#endPromise = new Promise((resolve, reject) => {
- this.#resolveEndPromise = resolve;
- this.#rejectEndPromise = reject;
- });
-
- // Don't let these promises cause unhandled rejection errors.
- // we will manually cause an unhandled rejection error later
- // if the user hasn't registered any error listener or called
- // any promise-returning method.
- this.#connectedPromise.catch(() => {});
- this.#endPromise.catch(() => {});
- }
-
- /**
- * Intended for use on the frontend, consuming a stream produced with
- * `.toReadableStream()` on the backend.
- *
- * Note that messages sent to the model do not appear in `.on('message')`
- * in this context.
- */
- static fromReadableStream(stream: ReadableStream): PromptCachingBetaMessageStream {
- const runner = new PromptCachingBetaMessageStream();
- runner._run(() => runner._fromReadableStream(stream));
- return runner;
- }
-
- static createMessage(
- messages: Messages,
- params: MessageCreateParamsBase,
- options?: Core.RequestOptions,
- ): PromptCachingBetaMessageStream {
- const runner = new PromptCachingBetaMessageStream();
- for (const message of params.messages) {
- runner._addPromptCachingBetaMessageParam(message);
- }
- runner._run(() =>
- runner._createPromptCachingBetaMessage(
- messages,
- { ...params, stream: true },
- { ...options, headers: { ...options?.headers, 'X-Stainless-Helper-Method': 'stream' } },
- ),
- );
- return runner;
- }
-
- protected _run(executor: () => Promise) {
- executor().then(() => {
- this._emitFinal();
- this._emit('end');
- }, this.#handleError);
- }
-
- protected _addPromptCachingBetaMessageParam(message: PromptCachingBetaMessageParam) {
- this.messages.push(message);
- }
-
- protected _addPromptCachingBetaMessage(message: PromptCachingBetaMessage, emit = true) {
- this.receivedMessages.push(message);
- if (emit) {
- this._emit('message', message);
- }
- }
-
- protected async _createPromptCachingBetaMessage(
- messages: Messages,
- params: MessageCreateParams,
- options?: Core.RequestOptions,
- ): Promise {
- const signal = options?.signal;
- if (signal) {
- if (signal.aborted) this.controller.abort();
- signal.addEventListener('abort', () => this.controller.abort());
- }
- this.#beginRequest();
- const stream = await messages.create(
- { ...params, stream: true },
- { ...options, signal: this.controller.signal },
- );
- this._connected();
- for await (const event of stream) {
- this.#addStreamEvent(event);
- }
- if (stream.controller.signal?.aborted) {
- throw new APIUserAbortError();
- }
- this.#endRequest();
- }
-
- protected _connected() {
- if (this.ended) return;
- this.#resolveConnectedPromise();
- this._emit('connect');
- }
-
- get ended(): boolean {
- return this.#ended;
- }
-
- get errored(): boolean {
- return this.#errored;
- }
-
- get aborted(): boolean {
- return this.#aborted;
- }
-
- abort() {
- this.controller.abort();
- }
-
- /**
- * Adds the listener function to the end of the listeners array for the event.
- * No checks are made to see if the listener has already been added. Multiple calls passing
- * the same combination of event and listener will result in the listener being added, and
- * called, multiple times.
- * @returns this PromptCachingBetaMessageStream, so that calls can be chained
- */
- on(
- event: Event,
- listener: PromptCachingBetaMessageStreamEvents[Event],
- ): this {
- const listeners: PromptCachingBetaMessageStreamEventListeners =
- this.#listeners[event] || (this.#listeners[event] = []);
- listeners.push({ listener });
- return this;
- }
-
- /**
- * Removes the specified listener from the listener array for the event.
- * off() will remove, at most, one instance of a listener from the listener array. If any single
- * listener has been added multiple times to the listener array for the specified event, then
- * off() must be called multiple times to remove each instance.
- * @returns this PromptCachingBetaMessageStream, so that calls can be chained
- */
- off(
- event: Event,
- listener: PromptCachingBetaMessageStreamEvents[Event],
- ): this {
- const listeners = this.#listeners[event];
- if (!listeners) return this;
- const index = listeners.findIndex((l) => l.listener === listener);
- if (index >= 0) listeners.splice(index, 1);
- return this;
- }
-
- /**
- * Adds a one-time listener function for the event. The next time the event is triggered,
- * this listener is removed and then invoked.
- * @returns this PromptCachingBetaMessageStream, so that calls can be chained
- */
- once(
- event: Event,
- listener: PromptCachingBetaMessageStreamEvents[Event],
- ): this {
- const listeners: PromptCachingBetaMessageStreamEventListeners =
- this.#listeners[event] || (this.#listeners[event] = []);
- listeners.push({ listener, once: true });
- return this;
- }
-
- /**
- * This is similar to `.once()`, but returns a Promise that resolves the next time
- * the event is triggered, instead of calling a listener callback.
- * @returns a Promise that resolves the next time given event is triggered,
- * or rejects if an error is emitted. (If you request the 'error' event,
- * returns a promise that resolves with the error).
- *
- * Example:
- *
- * const message = await stream.emitted('message') // rejects if the stream errors
- */
- emitted(
- event: Event,
- ): Promise<
- Parameters extends [infer Param] ? Param
- : Parameters extends [] ? void
- : Parameters
- > {
- return new Promise((resolve, reject) => {
- this.#catchingPromiseCreated = true;
- if (event !== 'error') this.once('error', reject);
- this.once(event, resolve as any);
- });
- }
-
- async done(): Promise {
- this.#catchingPromiseCreated = true;
- await this.#endPromise;
- }
-
- get currentMessage(): PromptCachingBetaMessage | undefined {
- return this.#currentMessageSnapshot;
- }
-
- #getFinalMessage(): PromptCachingBetaMessage {
- if (this.receivedMessages.length === 0) {
- throw new AnthropicError(
- 'stream ended without producing a PromptCachingBetaMessage with role=assistant',
- );
- }
- return this.receivedMessages.at(-1)!;
- }
-
- /**
- * @returns a promise that resolves with the the final assistant PromptCachingBetaMessage response,
- * or rejects if an error occurred or the stream ended prematurely without producing a PromptCachingBetaMessage.
- */
- async finalMessage(): Promise {
- await this.done();
- return this.#getFinalMessage();
- }
-
- #getFinalText(): string {
- if (this.receivedMessages.length === 0) {
- throw new AnthropicError(
- 'stream ended without producing a PromptCachingBetaMessage with role=assistant',
- );
- }
- const textBlocks = this.receivedMessages
- .at(-1)!
- .content.filter((block): block is TextBlock => block.type === 'text')
- .map((block) => block.text);
- if (textBlocks.length === 0) {
- throw new AnthropicError('stream ended without producing a content block with type=text');
- }
- return textBlocks.join(' ');
- }
-
- /**
- * @returns a promise that resolves with the the final assistant PromptCachingBetaMessage's text response, concatenated
- * together if there are more than one text blocks.
- * Rejects if an error occurred or the stream ended prematurely without producing a PromptCachingBetaMessage.
- */
- async finalText(): Promise {
- await this.done();
- return this.#getFinalText();
- }
-
- #handleError = (error: unknown) => {
- this.#errored = true;
- if (error instanceof Error && error.name === 'AbortError') {
- error = new APIUserAbortError();
- }
- if (error instanceof APIUserAbortError) {
- this.#aborted = true;
- return this._emit('abort', error);
- }
- if (error instanceof AnthropicError) {
- return this._emit('error', error);
- }
- if (error instanceof Error) {
- const anthropicError: AnthropicError = new AnthropicError(error.message);
- // @ts-ignore
- anthropicError.cause = error;
- return this._emit('error', anthropicError);
- }
- return this._emit('error', new AnthropicError(String(error)));
- };
-
- protected _emit(
- event: Event,
- ...args: Parameters
- ) {
- // make sure we don't emit any PromptCachingBetaMessageStreamEvents after end
- if (this.#ended) return;
-
- if (event === 'end') {
- this.#ended = true;
- this.#resolveEndPromise();
- }
-
- const listeners: PromptCachingBetaMessageStreamEventListeners | undefined = this.#listeners[event];
- if (listeners) {
- this.#listeners[event] = listeners.filter((l) => !l.once) as any;
- listeners.forEach(({ listener }: any) => listener(...args));
- }
-
- if (event === 'abort') {
- const error = args[0] as APIUserAbortError;
- if (!this.#catchingPromiseCreated && !listeners?.length) {
- Promise.reject(error);
- }
- this.#rejectConnectedPromise(error);
- this.#rejectEndPromise(error);
- this._emit('end');
- return;
- }
-
- if (event === 'error') {
- // NOTE: _emit('error', error) should only be called from #handleError().
-
- const error = args[0] as AnthropicError;
- if (!this.#catchingPromiseCreated && !listeners?.length) {
- // Trigger an unhandled rejection if the user hasn't registered any error handlers.
- // If you are seeing stack traces here, make sure to handle errors via either:
- // - runner.on('error', () => ...)
- // - await runner.done()
- // - await runner.final...()
- // - etc.
- Promise.reject(error);
- }
- this.#rejectConnectedPromise(error);
- this.#rejectEndPromise(error);
- this._emit('end');
- }
- }
-
- protected _emitFinal() {
- const finalPromptCachingBetaMessage = this.receivedMessages.at(-1);
- if (finalPromptCachingBetaMessage) {
- this._emit('finalPromptCachingBetaMessage', this.#getFinalMessage());
- }
- }
-
- #beginRequest() {
- if (this.ended) return;
- this.#currentMessageSnapshot = undefined;
- }
- #addStreamEvent(event: RawPromptCachingBetaMessageStreamEvent) {
- if (this.ended) return;
- const messageSnapshot = this.#accumulateMessage(event);
- this._emit('streamEvent', event, messageSnapshot);
-
- switch (event.type) {
- case 'content_block_delta': {
- const content = messageSnapshot.content.at(-1)!;
- if (event.delta.type === 'text_delta' && content.type === 'text') {
- this._emit('text', event.delta.text, content.text || '');
- } else if (event.delta.type === 'input_json_delta' && content.type === 'tool_use') {
- if (content.input) {
- this._emit('inputJson', event.delta.partial_json, content.input);
- }
- }
- break;
- }
- case 'message_stop': {
- this._addPromptCachingBetaMessageParam(messageSnapshot);
- this._addPromptCachingBetaMessage(messageSnapshot, true);
- break;
- }
- case 'content_block_stop': {
- this._emit('contentBlock', messageSnapshot.content.at(-1)!);
- break;
- }
- case 'message_start': {
- this.#currentMessageSnapshot = messageSnapshot;
- break;
- }
- case 'content_block_start':
- case 'message_delta':
- break;
- }
- }
- #endRequest(): PromptCachingBetaMessage {
- if (this.ended) {
- throw new AnthropicError(`stream has ended, this shouldn't happen`);
- }
- const snapshot = this.#currentMessageSnapshot;
- if (!snapshot) {
- throw new AnthropicError(`request ended without sending any chunks`);
- }
- this.#currentMessageSnapshot = undefined;
- return snapshot;
- }
-
- protected async _fromReadableStream(
- readableStream: ReadableStream,
- options?: Core.RequestOptions,
- ): Promise {
- const signal = options?.signal;
- if (signal) {
- if (signal.aborted) this.controller.abort();
- signal.addEventListener('abort', () => this.controller.abort());
- }
- this.#beginRequest();
- this._connected();
- const stream = Stream.fromReadableStream(
- readableStream,
- this.controller,
- );
- for await (const event of stream) {
- this.#addStreamEvent(event);
- }
- if (stream.controller.signal?.aborted) {
- throw new APIUserAbortError();
- }
- this.#endRequest();
- }
-
- /**
- * Mutates this.#currentPromptCachingBetaMessage with the current event. Handling the accumulation of multiple messages
- * will be needed to be handled by the caller, this method will throw if you try to accumulate for multiple
- * messages.
- */
- #accumulateMessage(event: RawPromptCachingBetaMessageStreamEvent): PromptCachingBetaMessage {
- let snapshot = this.#currentMessageSnapshot;
-
- if (event.type === 'message_start') {
- if (snapshot) {
- throw new AnthropicError(`Unexpected event order, got ${event.type} before receiving "message_stop"`);
- }
- return event.message;
- }
-
- if (!snapshot) {
- throw new AnthropicError(`Unexpected event order, got ${event.type} before "message_start"`);
- }
-
- switch (event.type) {
- case 'message_stop':
- return snapshot;
- case 'message_delta':
- snapshot.stop_reason = event.delta.stop_reason;
- snapshot.stop_sequence = event.delta.stop_sequence;
- snapshot.usage.output_tokens = event.usage.output_tokens;
- return snapshot;
- case 'content_block_start':
- snapshot.content.push(event.content_block);
- return snapshot;
- case 'content_block_delta': {
- const snapshotContent = snapshot.content.at(event.index);
- if (snapshotContent?.type === 'text' && event.delta.type === 'text_delta') {
- snapshotContent.text += event.delta.text;
- } else if (snapshotContent?.type === 'tool_use' && event.delta.type === 'input_json_delta') {
- // we need to keep track of the raw JSON string as well so that we can
- // re-parse it for each delta, for now we just store it as an untyped
- // non-enumerable property on the snapshot
- let jsonBuf = (snapshotContent as any)[JSON_BUF_PROPERTY] || '';
- jsonBuf += event.delta.partial_json;
-
- Object.defineProperty(snapshotContent, JSON_BUF_PROPERTY, {
- value: jsonBuf,
- enumerable: false,
- writable: true,
- });
-
- if (jsonBuf) {
- snapshotContent.input = partialParse(jsonBuf);
- }
- }
- return snapshot;
- }
- case 'content_block_stop':
- return snapshot;
- }
- }
-
- [Symbol.asyncIterator](): AsyncIterator {
- const pushQueue: RawPromptCachingBetaMessageStreamEvent[] = [];
- const readQueue: {
- resolve: (chunk: RawPromptCachingBetaMessageStreamEvent | undefined) => void;
- reject: (error: unknown) => void;
- }[] = [];
- let done = false;
-
- this.on('streamEvent', (event) => {
- const reader = readQueue.shift();
- if (reader) {
- reader.resolve(event);
- } else {
- pushQueue.push(event);
- }
- });
-
- this.on('end', () => {
- done = true;
- for (const reader of readQueue) {
- reader.resolve(undefined);
- }
- readQueue.length = 0;
- });
-
- this.on('abort', (err) => {
- done = true;
- for (const reader of readQueue) {
- reader.reject(err);
- }
- readQueue.length = 0;
- });
-
- this.on('error', (err) => {
- done = true;
- for (const reader of readQueue) {
- reader.reject(err);
- }
- readQueue.length = 0;
- });
-
- return {
- next: async (): Promise> => {
- if (!pushQueue.length) {
- if (done) {
- return { value: undefined, done: true };
- }
- return new Promise((resolve, reject) =>
- readQueue.push({ resolve, reject }),
- ).then((chunk) => (chunk ? { value: chunk, done: false } : { value: undefined, done: true }));
- }
- const chunk = pushQueue.shift()!;
- return { value: chunk, done: false };
- },
- return: async () => {
- this.abort();
- return { value: undefined, done: true };
- },
- };
- }
-
- toReadableStream(): ReadableStream {
- const stream = new Stream(this[Symbol.asyncIterator].bind(this), this.controller);
- return stream.toReadableStream();
- }
-}
diff --git a/src/resources/beta/beta.ts b/src/resources/beta/beta.ts
index ee3c6ca5..e29a187c 100644
--- a/src/resources/beta/beta.ts
+++ b/src/resources/beta/beta.ts
@@ -1,6 +1,8 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
import { APIResource } from '../../resource';
+import * as ModelsAPI from './models';
+import { BetaModelInfo, BetaModelInfosPage, ModelListParams, Models } from './models';
import * as MessagesAPI from './messages/messages';
import {
BetaBase64PDFBlock,
@@ -44,12 +46,10 @@ import {
MessageCreateParamsStreaming,
Messages,
} from './messages/messages';
-import * as PromptCachingAPI from './prompt-caching/prompt-caching';
-import { PromptCaching } from './prompt-caching/prompt-caching';
export class Beta extends APIResource {
+ models: ModelsAPI.Models = new ModelsAPI.Models(this._client);
messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client);
- promptCaching: PromptCachingAPI.PromptCaching = new PromptCachingAPI.PromptCaching(this._client);
}
export type AnthropicBeta =
@@ -72,12 +72,20 @@ export interface BetaAuthenticationError {
type: 'authentication_error';
}
+export interface BetaBillingError {
+ message: string;
+
+ type: 'billing_error';
+}
+
export type BetaError =
| BetaInvalidRequestError
| BetaAuthenticationError
+ | BetaBillingError
| BetaPermissionError
| BetaNotFoundError
| BetaRateLimitError
+ | BetaGatewayTimeoutError
| BetaAPIError
| BetaOverloadedError;
@@ -87,6 +95,12 @@ export interface BetaErrorResponse {
type: 'error';
}
+export interface BetaGatewayTimeoutError {
+ message: string;
+
+ type: 'timeout_error';
+}
+
export interface BetaInvalidRequestError {
message: string;
@@ -117,16 +131,19 @@ export interface BetaRateLimitError {
type: 'rate_limit_error';
}
+Beta.Models = Models;
+Beta.BetaModelInfosPage = BetaModelInfosPage;
Beta.Messages = Messages;
-Beta.PromptCaching = PromptCaching;
export declare namespace Beta {
export {
type AnthropicBeta as AnthropicBeta,
type BetaAPIError as BetaAPIError,
type BetaAuthenticationError as BetaAuthenticationError,
+ type BetaBillingError as BetaBillingError,
type BetaError as BetaError,
type BetaErrorResponse as BetaErrorResponse,
+ type BetaGatewayTimeoutError as BetaGatewayTimeoutError,
type BetaInvalidRequestError as BetaInvalidRequestError,
type BetaNotFoundError as BetaNotFoundError,
type BetaOverloadedError as BetaOverloadedError,
@@ -134,6 +151,13 @@ export declare namespace Beta {
type BetaRateLimitError as BetaRateLimitError,
};
+ export {
+ Models as Models,
+ type BetaModelInfo as BetaModelInfo,
+ BetaModelInfosPage as BetaModelInfosPage,
+ type ModelListParams as ModelListParams,
+ };
+
export {
Messages as Messages,
type BetaBase64PDFBlock as BetaBase64PDFBlock,
@@ -176,6 +200,4 @@ export declare namespace Beta {
type MessageCreateParamsStreaming as MessageCreateParamsStreaming,
type MessageCountTokensParams as MessageCountTokensParams,
};
-
- export { PromptCaching as PromptCaching };
}
diff --git a/src/resources/beta/index.ts b/src/resources/beta/index.ts
index 6e2b0a89..a68f2327 100644
--- a/src/resources/beta/index.ts
+++ b/src/resources/beta/index.ts
@@ -5,14 +5,17 @@ export {
type AnthropicBeta,
type BetaAPIError,
type BetaAuthenticationError,
+ type BetaBillingError,
type BetaError,
type BetaErrorResponse,
+ type BetaGatewayTimeoutError,
type BetaInvalidRequestError,
type BetaNotFoundError,
type BetaOverloadedError,
type BetaPermissionError,
type BetaRateLimitError,
} from './beta';
+export { BetaModelInfosPage, Models, type BetaModelInfo, type ModelListParams } from './models';
export {
Messages,
type BetaBase64PDFBlock,
@@ -55,4 +58,3 @@ export {
type MessageCreateParamsStreaming,
type MessageCountTokensParams,
} from './messages/index';
-export { PromptCaching } from './prompt-caching/index';
diff --git a/src/resources/beta/messages/messages.ts b/src/resources/beta/messages/messages.ts
index 3f39ca3a..186a6c36 100644
--- a/src/resources/beta/messages/messages.ts
+++ b/src/resources/beta/messages/messages.ts
@@ -4,8 +4,8 @@ import { APIResource } from '../../../resource';
import { APIPromise } from '../../../core';
import * as Core from '../../../core';
import * as MessagesMessagesAPI from './messages';
-import * as MessagesAPI from '../../messages';
import * as BetaAPI from '../beta';
+import * as MessagesAPI from '../../messages/messages';
import * as BatchesAPI from './batches';
import {
BatchCancelParams,
diff --git a/src/resources/beta/models.ts b/src/resources/beta/models.ts
new file mode 100644
index 00000000..48036273
--- /dev/null
+++ b/src/resources/beta/models.ts
@@ -0,0 +1,78 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
+import { Page, type PageParams } from '../../pagination';
+
+export class Models extends APIResource {
+ /**
+ * Get a specific model.
+ *
+ * The Models API response can be used to determine information about a specific
+ * model or resolve a model alias to a model ID.
+ */
+ retrieve(modelId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.get(`/v1/models/${modelId}?beta=true`, options);
+ }
+
+ /**
+ * List available models.
+ *
+ * The Models API response can be used to determine which models are available for
+ * use in the API. More recently released models are listed first.
+ */
+ list(
+ query?: ModelListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(options?: Core.RequestOptions): Core.PagePromise;
+ list(
+ query: ModelListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list({}, query);
+ }
+ return this._client.getAPIList('/v1/models?beta=true', BetaModelInfosPage, { query, ...options });
+ }
+}
+
+export class BetaModelInfosPage extends Page {}
+
+export interface BetaModelInfo {
+ /**
+ * Unique model identifier.
+ */
+ id: string;
+
+ /**
+ * RFC 3339 datetime string representing the time at which the model was released.
+ * May be set to an epoch value if the release date is unknown.
+ */
+ created_at: string;
+
+ /**
+ * A human-readable name for the model.
+ */
+ display_name: string;
+
+ /**
+ * Object type.
+ *
+ * For Models, this is always `"model"`.
+ */
+ type: 'model';
+}
+
+export interface ModelListParams extends PageParams {}
+
+Models.BetaModelInfosPage = BetaModelInfosPage;
+
+export declare namespace Models {
+ export {
+ type BetaModelInfo as BetaModelInfo,
+ BetaModelInfosPage as BetaModelInfosPage,
+ type ModelListParams as ModelListParams,
+ };
+}
diff --git a/src/resources/beta/prompt-caching/index.ts b/src/resources/beta/prompt-caching/index.ts
deleted file mode 100644
index 78b4e747..00000000
--- a/src/resources/beta/prompt-caching/index.ts
+++ /dev/null
@@ -1,20 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-export {
- Messages,
- type PromptCachingBetaCacheControlEphemeral,
- type PromptCachingBetaImageBlockParam,
- type PromptCachingBetaMessage,
- type PromptCachingBetaMessageParam,
- type PromptCachingBetaTextBlockParam,
- type PromptCachingBetaTool,
- type PromptCachingBetaToolResultBlockParam,
- type PromptCachingBetaToolUseBlockParam,
- type PromptCachingBetaUsage,
- type RawPromptCachingBetaMessageStartEvent,
- type RawPromptCachingBetaMessageStreamEvent,
- type MessageCreateParams,
- type MessageCreateParamsNonStreaming,
- type MessageCreateParamsStreaming,
-} from './messages';
-export { PromptCaching } from './prompt-caching';
diff --git a/src/resources/beta/prompt-caching/messages.ts b/src/resources/beta/prompt-caching/messages.ts
deleted file mode 100644
index 4ae7449b..00000000
--- a/src/resources/beta/prompt-caching/messages.ts
+++ /dev/null
@@ -1,642 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../../resource';
-import { APIPromise } from '../../../core';
-import * as Core from '../../../core';
-import * as PromptCachingMessagesAPI from './messages';
-import * as MessagesAPI from '../../messages';
-import * as BetaAPI from '../beta';
-import { Stream } from '../../../streaming';
-import { PromptCachingBetaMessageStream } from '../../../lib/PromptCachingBetaMessageStream';
-
-export class Messages extends APIResource {
- /**
- * Send a structured list of input messages with text and/or image content, and the
- * model will generate the next message in the conversation.
- *
- * The Messages API can be used for either single queries or stateless multi-turn
- * conversations.
- */
- create(
- params: MessageCreateParamsNonStreaming,
- options?: Core.RequestOptions,
- ): APIPromise;
- create(
- params: MessageCreateParamsStreaming,
- options?: Core.RequestOptions,
- ): APIPromise>;
- create(
- params: MessageCreateParamsBase,
- options?: Core.RequestOptions,
- ): APIPromise | PromptCachingBetaMessage>;
- create(
- params: MessageCreateParams,
- options?: Core.RequestOptions,
- ): APIPromise | APIPromise> {
- const { betas, ...body } = params;
- return this._client.post('/v1/messages?beta=prompt_caching', {
- body,
- timeout: (this._client as any)._options.timeout ?? 600000,
- ...options,
- headers: {
- 'anthropic-beta': [...(betas ?? []), 'prompt-caching-2024-07-31'].toString(),
- ...options?.headers,
- },
- stream: params.stream ?? false,
- }) as APIPromise | APIPromise>;
- }
-
- /**
- * Create a Message stream
- */
- stream(body: MessageStreamParams, options?: Core.RequestOptions): PromptCachingBetaMessageStream {
- return PromptCachingBetaMessageStream.createMessage(this, body, options);
- }
-}
-
-export type MessageStreamParams = MessageCreateParamsBase;
-
-export interface PromptCachingBetaCacheControlEphemeral {
- type: 'ephemeral';
-}
-
-export interface PromptCachingBetaImageBlockParam {
- source: PromptCachingBetaImageBlockParam.Source;
-
- type: 'image';
-
- cache_control?: PromptCachingBetaCacheControlEphemeral | null;
-}
-
-export namespace PromptCachingBetaImageBlockParam {
- export interface Source {
- data: string;
-
- media_type: 'image/jpeg' | 'image/png' | 'image/gif' | 'image/webp';
-
- type: 'base64';
- }
-}
-
-export interface PromptCachingBetaMessage {
- /**
- * Unique object identifier.
- *
- * The format and length of IDs may change over time.
- */
- id: string;
-
- /**
- * Content generated by the model.
- *
- * This is an array of content blocks, each of which has a `type` that determines
- * its shape.
- *
- * Example:
- *
- * ```json
- * [{ "type": "text", "text": "Hi, I'm Claude." }]
- * ```
- *
- * If the request input `messages` ended with an `assistant` turn, then the
- * response `content` will continue directly from that last turn. You can use this
- * to constrain the model's output.
- *
- * For example, if the input `messages` were:
- *
- * ```json
- * [
- * {
- * "role": "user",
- * "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
- * },
- * { "role": "assistant", "content": "The best answer is (" }
- * ]
- * ```
- *
- * Then the response `content` might be:
- *
- * ```json
- * [{ "type": "text", "text": "B)" }]
- * ```
- */
- content: Array;
-
- /**
- * The model that will complete your prompt.\n\nSee
- * [models](https://docs.anthropic.com/en/docs/models-overview) for additional
- * details and options.
- */
- model: MessagesAPI.Model;
-
- /**
- * Conversational role of the generated message.
- *
- * This will always be `"assistant"`.
- */
- role: 'assistant';
-
- /**
- * The reason that we stopped.
- *
- * This may be one the following values:
- *
- * - `"end_turn"`: the model reached a natural stopping point
- * - `"max_tokens"`: we exceeded the requested `max_tokens` or the model's maximum
- * - `"stop_sequence"`: one of your provided custom `stop_sequences` was generated
- * - `"tool_use"`: the model invoked one or more tools
- *
- * In non-streaming mode this value is always non-null. In streaming mode, it is
- * null in the `message_start` event and non-null otherwise.
- */
- stop_reason: 'end_turn' | 'max_tokens' | 'stop_sequence' | 'tool_use' | null;
-
- /**
- * Which custom stop sequence was generated, if any.
- *
- * This value will be a non-null string if one of your custom stop sequences was
- * generated.
- */
- stop_sequence: string | null;
-
- /**
- * Object type.
- *
- * For Messages, this is always `"message"`.
- */
- type: 'message';
-
- /**
- * Billing and rate-limit usage.
- *
- * Anthropic's API bills and rate-limits by token counts, as tokens represent the
- * underlying cost to our systems.
- *
- * Under the hood, the API transforms requests into a format suitable for the
- * model. The model's output then goes through a parsing stage before becoming an
- * API response. As a result, the token counts in `usage` will not match one-to-one
- * with the exact visible content of an API request or response.
- *
- * For example, `output_tokens` will be non-zero, even for an empty string response
- * from Claude.
- */
- usage: PromptCachingBetaUsage;
-}
-
-export interface PromptCachingBetaMessageParam {
- content:
- | string
- | Array<
- | PromptCachingBetaTextBlockParam
- | PromptCachingBetaImageBlockParam
- | PromptCachingBetaToolUseBlockParam
- | PromptCachingBetaToolResultBlockParam
- >;
-
- role: 'user' | 'assistant';
-}
-
-export interface PromptCachingBetaTextBlockParam {
- text: string;
-
- type: 'text';
-
- cache_control?: PromptCachingBetaCacheControlEphemeral | null;
-}
-
-export interface PromptCachingBetaTool {
- /**
- * [JSON schema](https://json-schema.org/) for this tool's input.
- *
- * This defines the shape of the `input` that your tool accepts and that the model
- * will produce.
- */
- input_schema: PromptCachingBetaTool.InputSchema;
-
- /**
- * Name of the tool.
- *
- * This is how the tool will be called by the model and in tool_use blocks.
- */
- name: string;
-
- cache_control?: PromptCachingBetaCacheControlEphemeral | null;
-
- /**
- * Description of what this tool does.
- *
- * Tool descriptions should be as detailed as possible. The more information that
- * the model has about what the tool is and how to use it, the better it will
- * perform. You can use natural language descriptions to reinforce important
- * aspects of the tool input JSON schema.
- */
- description?: string;
-}
-
-export namespace PromptCachingBetaTool {
- /**
- * [JSON schema](https://json-schema.org/) for this tool's input.
- *
- * This defines the shape of the `input` that your tool accepts and that the model
- * will produce.
- */
- export interface InputSchema {
- type: 'object';
-
- properties?: unknown | null;
- [k: string]: unknown;
- }
-}
-
-export interface PromptCachingBetaToolResultBlockParam {
- tool_use_id: string;
-
- type: 'tool_result';
-
- cache_control?: PromptCachingBetaCacheControlEphemeral | null;
-
- content?: string | Array;
-
- is_error?: boolean;
-}
-
-export interface PromptCachingBetaToolUseBlockParam {
- id: string;
-
- input: unknown;
-
- name: string;
-
- type: 'tool_use';
-
- cache_control?: PromptCachingBetaCacheControlEphemeral | null;
-}
-
-export interface PromptCachingBetaUsage {
- /**
- * The number of input tokens used to create the cache entry.
- */
- cache_creation_input_tokens: number | null;
-
- /**
- * The number of input tokens read from the cache.
- */
- cache_read_input_tokens: number | null;
-
- /**
- * The number of input tokens which were used.
- */
- input_tokens: number;
-
- /**
- * The number of output tokens which were used.
- */
- output_tokens: number;
-}
-
-export interface RawPromptCachingBetaMessageStartEvent {
- message: PromptCachingBetaMessage;
-
- type: 'message_start';
-}
-
-export type RawPromptCachingBetaMessageStreamEvent =
- | RawPromptCachingBetaMessageStartEvent
- | MessagesAPI.RawMessageDeltaEvent
- | MessagesAPI.RawMessageStopEvent
- | MessagesAPI.RawContentBlockStartEvent
- | MessagesAPI.RawContentBlockDeltaEvent
- | MessagesAPI.RawContentBlockStopEvent;
-
-export type MessageCreateParams = MessageCreateParamsNonStreaming | MessageCreateParamsStreaming;
-
-export interface MessageCreateParamsBase {
- /**
- * Body param: The maximum number of tokens to generate before stopping.
- *
- * Note that our models may stop _before_ reaching this maximum. This parameter
- * only specifies the absolute maximum number of tokens to generate.
- *
- * Different models have different maximum values for this parameter. See
- * [models](https://docs.anthropic.com/en/docs/models-overview) for details.
- */
- max_tokens: number;
-
- /**
- * Body param: Input messages.
- *
- * Our models are trained to operate on alternating `user` and `assistant`
- * conversational turns. When creating a new `Message`, you specify the prior
- * conversational turns with the `messages` parameter, and the model then generates
- * the next `Message` in the conversation. Consecutive `user` or `assistant` turns
- * in your request will be combined into a single turn.
- *
- * Each input message must be an object with a `role` and `content`. You can
- * specify a single `user`-role message, or you can include multiple `user` and
- * `assistant` messages.
- *
- * If the final message uses the `assistant` role, the response content will
- * continue immediately from the content in that message. This can be used to
- * constrain part of the model's response.
- *
- * Example with a single `user` message:
- *
- * ```json
- * [{ "role": "user", "content": "Hello, Claude" }]
- * ```
- *
- * Example with multiple conversational turns:
- *
- * ```json
- * [
- * { "role": "user", "content": "Hello there." },
- * { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
- * { "role": "user", "content": "Can you explain LLMs in plain English?" }
- * ]
- * ```
- *
- * Example with a partially-filled response from Claude:
- *
- * ```json
- * [
- * {
- * "role": "user",
- * "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
- * },
- * { "role": "assistant", "content": "The best answer is (" }
- * ]
- * ```
- *
- * Each input message `content` may be either a single `string` or an array of
- * content blocks, where each block has a specific `type`. Using a `string` for
- * `content` is shorthand for an array of one content block of type `"text"`. The
- * following input messages are equivalent:
- *
- * ```json
- * { "role": "user", "content": "Hello, Claude" }
- * ```
- *
- * ```json
- * { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
- * ```
- *
- * Starting with Claude 3 models, you can also send image content blocks:
- *
- * ```json
- * {
- * "role": "user",
- * "content": [
- * {
- * "type": "image",
- * "source": {
- * "type": "base64",
- * "media_type": "image/jpeg",
- * "data": "/9j/4AAQSkZJRg..."
- * }
- * },
- * { "type": "text", "text": "What is in this image?" }
- * ]
- * }
- * ```
- *
- * We currently support the `base64` source type for images, and the `image/jpeg`,
- * `image/png`, `image/gif`, and `image/webp` media types.
- *
- * See [examples](https://docs.anthropic.com/en/api/messages-examples#vision) for
- * more input examples.
- *
- * Note that if you want to include a
- * [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
- * the top-level `system` parameter — there is no `"system"` role for input
- * messages in the Messages API.
- */
- messages: Array;
-
- /**
- * Body param: The model that will complete your prompt.\n\nSee
- * [models](https://docs.anthropic.com/en/docs/models-overview) for additional
- * details and options.
- */
- model: MessagesAPI.Model;
-
- /**
- * Body param: An object describing metadata about the request.
- */
- metadata?: MessagesAPI.Metadata;
-
- /**
- * Body param: Custom text sequences that will cause the model to stop generating.
- *
- * Our models will normally stop when they have naturally completed their turn,
- * which will result in a response `stop_reason` of `"end_turn"`.
- *
- * If you want the model to stop generating when it encounters custom strings of
- * text, you can use the `stop_sequences` parameter. If the model encounters one of
- * the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
- * and the response `stop_sequence` value will contain the matched stop sequence.
- */
- stop_sequences?: Array;
-
- /**
- * Body param: Whether to incrementally stream the response using server-sent
- * events.
- *
- * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
- * details.
- */
- stream?: boolean;
-
- /**
- * Body param: System prompt.
- *
- * A system prompt is a way of providing context and instructions to Claude, such
- * as specifying a particular goal or role. See our
- * [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
- */
- system?: string | Array;
-
- /**
- * Body param: Amount of randomness injected into the response.
- *
- * Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
- * for analytical / multiple choice, and closer to `1.0` for creative and
- * generative tasks.
- *
- * Note that even with `temperature` of `0.0`, the results will not be fully
- * deterministic.
- */
- temperature?: number;
-
- /**
- * Body param: How the model should use the provided tools. The model can use a
- * specific tool, any available tool, or decide by itself.
- */
- tool_choice?: MessagesAPI.ToolChoice;
-
- /**
- * Body param: Definitions of tools that the model may use.
- *
- * If you include `tools` in your API request, the model may return `tool_use`
- * content blocks that represent the model's use of those tools. You can then run
- * those tools using the tool input generated by the model and then optionally
- * return results back to the model using `tool_result` content blocks.
- *
- * Each tool definition includes:
- *
- * - `name`: Name of the tool.
- * - `description`: Optional, but strongly-recommended description of the tool.
- * - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
- * shape that the model will produce in `tool_use` output content blocks.
- *
- * For example, if you defined `tools` as:
- *
- * ```json
- * [
- * {
- * "name": "get_stock_price",
- * "description": "Get the current stock price for a given ticker symbol.",
- * "input_schema": {
- * "type": "object",
- * "properties": {
- * "ticker": {
- * "type": "string",
- * "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
- * }
- * },
- * "required": ["ticker"]
- * }
- * }
- * ]
- * ```
- *
- * And then asked the model "What's the S&P 500 at today?", the model might produce
- * `tool_use` content blocks in the response like this:
- *
- * ```json
- * [
- * {
- * "type": "tool_use",
- * "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- * "name": "get_stock_price",
- * "input": { "ticker": "^GSPC" }
- * }
- * ]
- * ```
- *
- * You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
- * input, and return the following back to the model in a subsequent `user`
- * message:
- *
- * ```json
- * [
- * {
- * "type": "tool_result",
- * "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
- * "content": "259.75 USD"
- * }
- * ]
- * ```
- *
- * Tools can be used for workflows that include running client-side tools and
- * functions, or more generally whenever you want the model to produce a particular
- * JSON structure of output.
- *
- * See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
- */
- tools?: Array;
-
- /**
- * Body param: Only sample from the top K options for each subsequent token.
- *
- * Used to remove "long tail" low probability responses.
- * [Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
- *
- * Recommended for advanced use cases only. You usually only need to use
- * `temperature`.
- */
- top_k?: number;
-
- /**
- * Body param: Use nucleus sampling.
- *
- * In nucleus sampling, we compute the cumulative distribution over all the options
- * for each subsequent token in decreasing probability order and cut it off once it
- * reaches a particular probability specified by `top_p`. You should either alter
- * `temperature` or `top_p`, but not both.
- *
- * Recommended for advanced use cases only. You usually only need to use
- * `temperature`.
- */
- top_p?: number;
-
- /**
- * Header param: Optional header to specify the beta version(s) you want to use.
- */
- betas?: Array;
-}
-
-export namespace MessageCreateParams {
- /**
- * @deprecated use `Anthropic.Messages.Metadata` instead
- */
- export type Metadata = MessagesAPI.Metadata;
-
- /**
- * @deprecated use `Anthropic.Messages.ToolChoiceAuto` instead
- */
- export type ToolChoiceAuto = MessagesAPI.ToolChoiceAuto;
-
- /**
- * @deprecated use `Anthropic.Messages.ToolChoiceAny` instead
- */
- export type ToolChoiceAny = MessagesAPI.ToolChoiceAny;
-
- /**
- * @deprecated use `Anthropic.Messages.ToolChoiceTool` instead
- */
- export type ToolChoiceTool = MessagesAPI.ToolChoiceTool;
-
- export type MessageCreateParamsNonStreaming = PromptCachingMessagesAPI.MessageCreateParamsNonStreaming;
- export type MessageCreateParamsStreaming = PromptCachingMessagesAPI.MessageCreateParamsStreaming;
-}
-
-export interface MessageCreateParamsNonStreaming extends MessageCreateParamsBase {
- /**
- * Body param: Whether to incrementally stream the response using server-sent
- * events.
- *
- * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
- * details.
- */
- stream?: false;
-}
-
-export interface MessageCreateParamsStreaming extends MessageCreateParamsBase {
- /**
- * Body param: Whether to incrementally stream the response using server-sent
- * events.
- *
- * See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
- * details.
- */
- stream: true;
-}
-
-export declare namespace Messages {
- export {
- type PromptCachingBetaCacheControlEphemeral as PromptCachingBetaCacheControlEphemeral,
- type PromptCachingBetaImageBlockParam as PromptCachingBetaImageBlockParam,
- type PromptCachingBetaMessage as PromptCachingBetaMessage,
- type PromptCachingBetaMessageParam as PromptCachingBetaMessageParam,
- type PromptCachingBetaTextBlockParam as PromptCachingBetaTextBlockParam,
- type PromptCachingBetaTool as PromptCachingBetaTool,
- type PromptCachingBetaToolResultBlockParam as PromptCachingBetaToolResultBlockParam,
- type PromptCachingBetaToolUseBlockParam as PromptCachingBetaToolUseBlockParam,
- type PromptCachingBetaUsage as PromptCachingBetaUsage,
- type RawPromptCachingBetaMessageStartEvent as RawPromptCachingBetaMessageStartEvent,
- type RawPromptCachingBetaMessageStreamEvent as RawPromptCachingBetaMessageStreamEvent,
- type MessageCreateParams as MessageCreateParams,
- type MessageCreateParamsNonStreaming as MessageCreateParamsNonStreaming,
- type MessageCreateParamsStreaming as MessageCreateParamsStreaming,
- };
-}
diff --git a/src/resources/beta/prompt-caching/prompt-caching.ts b/src/resources/beta/prompt-caching/prompt-caching.ts
deleted file mode 100644
index 421f8621..00000000
--- a/src/resources/beta/prompt-caching/prompt-caching.ts
+++ /dev/null
@@ -1,47 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import { APIResource } from '../../../resource';
-import * as MessagesAPI from './messages';
-import {
- MessageCreateParams,
- MessageCreateParamsNonStreaming,
- MessageCreateParamsStreaming,
- Messages,
- PromptCachingBetaCacheControlEphemeral,
- PromptCachingBetaImageBlockParam,
- PromptCachingBetaMessage,
- PromptCachingBetaMessageParam,
- PromptCachingBetaTextBlockParam,
- PromptCachingBetaTool,
- PromptCachingBetaToolResultBlockParam,
- PromptCachingBetaToolUseBlockParam,
- PromptCachingBetaUsage,
- RawPromptCachingBetaMessageStartEvent,
- RawPromptCachingBetaMessageStreamEvent,
-} from './messages';
-
-export class PromptCaching extends APIResource {
- messages: MessagesAPI.Messages = new MessagesAPI.Messages(this._client);
-}
-
-PromptCaching.Messages = Messages;
-
-export declare namespace PromptCaching {
- export {
- Messages as Messages,
- type PromptCachingBetaCacheControlEphemeral as PromptCachingBetaCacheControlEphemeral,
- type PromptCachingBetaImageBlockParam as PromptCachingBetaImageBlockParam,
- type PromptCachingBetaMessage as PromptCachingBetaMessage,
- type PromptCachingBetaMessageParam as PromptCachingBetaMessageParam,
- type PromptCachingBetaTextBlockParam as PromptCachingBetaTextBlockParam,
- type PromptCachingBetaTool as PromptCachingBetaTool,
- type PromptCachingBetaToolResultBlockParam as PromptCachingBetaToolResultBlockParam,
- type PromptCachingBetaToolUseBlockParam as PromptCachingBetaToolUseBlockParam,
- type PromptCachingBetaUsage as PromptCachingBetaUsage,
- type RawPromptCachingBetaMessageStartEvent as RawPromptCachingBetaMessageStartEvent,
- type RawPromptCachingBetaMessageStreamEvent as RawPromptCachingBetaMessageStreamEvent,
- type MessageCreateParams as MessageCreateParams,
- type MessageCreateParamsNonStreaming as MessageCreateParamsNonStreaming,
- type MessageCreateParamsStreaming as MessageCreateParamsStreaming,
- };
-}
diff --git a/src/resources/completions.ts b/src/resources/completions.ts
index a2ef4d98..2260681d 100644
--- a/src/resources/completions.ts
+++ b/src/resources/completions.ts
@@ -4,7 +4,7 @@ import { APIResource } from '../resource';
import { APIPromise } from '../core';
import * as Core from '../core';
import * as CompletionsAPI from './completions';
-import * as MessagesAPI from './messages';
+import * as MessagesAPI from './messages/messages';
import { Stream } from '../streaming';
export class Completions extends APIResource {
diff --git a/src/resources/index.ts b/src/resources/index.ts
index da3136a7..23366973 100644
--- a/src/resources/index.ts
+++ b/src/resources/index.ts
@@ -1,12 +1,15 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+export * from './shared';
export {
Beta,
type AnthropicBeta,
type BetaAPIError,
type BetaAuthenticationError,
+ type BetaBillingError,
type BetaError,
type BetaErrorResponse,
+ type BetaGatewayTimeoutError,
type BetaInvalidRequestError,
type BetaNotFoundError,
type BetaOverloadedError,
@@ -22,11 +25,14 @@ export {
} from './completions';
export {
Messages,
+ type Base64PDFSource,
+ type CacheControlEphemeral,
type ContentBlock,
type ContentBlockDeltaEvent,
type ContentBlockParam,
type ContentBlockStartEvent,
type ContentBlockStopEvent,
+ type DocumentBlockParam,
type ImageBlockParam,
type InputJsonDelta,
type InputJSONDelta,
@@ -38,6 +44,7 @@ export {
type MessageStopEvent,
type MessageStreamEvent,
type MessageStreamParams,
+ type MessageTokensCount,
type Metadata,
type Model,
type RawContentBlockDeltaEvent,
@@ -62,4 +69,6 @@ export {
type MessageCreateParams,
type MessageCreateParamsNonStreaming,
type MessageCreateParamsStreaming,
-} from './messages';
+ type MessageCountTokensParams,
+} from './messages/messages';
+export { ModelInfosPage, Models, type ModelInfo, type ModelListParams } from './models';
diff --git a/src/resources/messages/batches.ts b/src/resources/messages/batches.ts
new file mode 100644
index 00000000..b4fd45e8
--- /dev/null
+++ b/src/resources/messages/batches.ts
@@ -0,0 +1,298 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../../resource';
+import { isRequestOptions } from '../../core';
+import * as Core from '../../core';
+import * as Shared from '../shared';
+import * as MessagesAPI from './messages';
+import { Page, type PageParams } from '../../pagination';
+import { JSONLDecoder } from '../../internal/decoders/jsonl';
+import { AnthropicError } from '../../error';
+
+export class Batches extends APIResource {
+ /**
+ * Send a batch of Message creation requests.
+ *
+ * The Message Batches API can be used to process multiple Messages API requests at
+ * once. Once a Message Batch is created, it begins processing immediately. Batches
+ * can take up to 24 hours to complete.
+ */
+ create(body: BatchCreateParams, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post('/v1/messages/batches', { body, ...options });
+ }
+
+ /**
+ * This endpoint is idempotent and can be used to poll for Message Batch
+ * completion. To access the results of a Message Batch, make a request to the
+ * `results_url` field in the response.
+ */
+ retrieve(messageBatchId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.get(`/v1/messages/batches/${messageBatchId}`, options);
+ }
+
+ /**
+ * List all Message Batches within a Workspace. Most recently created batches are
+ * returned first.
+ */
+ list(
+ query?: BatchListParams,
+ options?: Core.RequestOptions,
+ ): Core.PagePromise;
+ list(options?: Core.RequestOptions): Core.PagePromise;
+ list(
+ query: BatchListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list({}, query);
+ }
+ return this._client.getAPIList('/v1/messages/batches', MessageBatchesPage, { query, ...options });
+ }
+
+ /**
+ * Batches may be canceled any time before processing ends. Once cancellation is
+ * initiated, the batch enters a `canceling` state, at which time the system may
+ * complete any in-progress, non-interruptible requests before finalizing
+ * cancellation.
+ *
+ * The number of canceled requests is specified in `request_counts`. To determine
+ * which requests were canceled, check the individual results within the batch.
+ * Note that cancellation may not result in any canceled requests if they were
+ * non-interruptible.
+ */
+ cancel(messageBatchId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.post(`/v1/messages/batches/${messageBatchId}/cancel`, options);
+ }
+
+ /**
+ * Streams the results of a Message Batch as a `.jsonl` file.
+ *
+ * Each line in the file is a JSON object containing the result of a single request
+ * in the Message Batch. Results are not guaranteed to be in the same order as
+ * requests. Use the `custom_id` field to match results to requests.
+ */
+ async results(
+ messageBatchId: string,
+ options?: Core.RequestOptions,
+ ): Promise> {
+ const batch = await this.retrieve(messageBatchId);
+ if (!batch.results_url) {
+ throw new AnthropicError(
+ `No batch \`results_url\`; Has it finished processing? ${batch.processing_status} - ${batch.id}`,
+ );
+ }
+
+ return this._client
+ .get(batch.results_url, { ...options, __binaryResponse: true })
+ ._thenUnwrap((_, props) => JSONLDecoder.fromResponse(props.response, props.controller));
+ }
+}
+
+export class MessageBatchesPage extends Page {}
+
+export interface MessageBatch {
+ /**
+ * Unique object identifier.
+ *
+ * The format and length of IDs may change over time.
+ */
+ id: string;
+
+ /**
+ * RFC 3339 datetime string representing the time at which the Message Batch was
+ * archived and its results became unavailable.
+ */
+ archived_at: string | null;
+
+ /**
+ * RFC 3339 datetime string representing the time at which cancellation was
+ * initiated for the Message Batch. Specified only if cancellation was initiated.
+ */
+ cancel_initiated_at: string | null;
+
+ /**
+ * RFC 3339 datetime string representing the time at which the Message Batch was
+ * created.
+ */
+ created_at: string;
+
+ /**
+ * RFC 3339 datetime string representing the time at which processing for the
+ * Message Batch ended. Specified only once processing ends.
+ *
+ * Processing ends when every request in a Message Batch has either succeeded,
+ * errored, canceled, or expired.
+ */
+ ended_at: string | null;
+
+ /**
+ * RFC 3339 datetime string representing the time at which the Message Batch will
+ * expire and end processing, which is 24 hours after creation.
+ */
+ expires_at: string;
+
+ /**
+ * Processing status of the Message Batch.
+ */
+ processing_status: 'in_progress' | 'canceling' | 'ended';
+
+ /**
+ * Tallies requests within the Message Batch, categorized by their status.
+ *
+ * Requests start as `processing` and move to one of the other statuses only once
+ * processing of the entire batch ends. The sum of all values always matches the
+ * total number of requests in the batch.
+ */
+ request_counts: MessageBatchRequestCounts;
+
+ /**
+ * URL to a `.jsonl` file containing the results of the Message Batch requests.
+ * Specified only once processing ends.
+ *
+ * Results in the file are not guaranteed to be in the same order as requests. Use
+ * the `custom_id` field to match results to requests.
+ */
+ results_url: string | null;
+
+ /**
+ * Object type.
+ *
+ * For Message Batches, this is always `"message_batch"`.
+ */
+ type: 'message_batch';
+}
+
+export interface MessageBatchCanceledResult {
+ type: 'canceled';
+}
+
+export interface MessageBatchErroredResult {
+ error: Shared.ErrorResponse;
+
+ type: 'errored';
+}
+
+export interface MessageBatchExpiredResult {
+ type: 'expired';
+}
+
+export interface MessageBatchIndividualResponse {
+ /**
+ * Developer-provided ID created for each request in a Message Batch. Useful for
+ * matching results to requests, as results may be given out of request order.
+ *
+ * Must be unique for each request within the Message Batch.
+ */
+ custom_id: string;
+
+ /**
+ * Processing result for this request.
+ *
+ * Contains a Message output if processing was successful, an error response if
+ * processing failed, or the reason why processing was not attempted, such as
+ * cancellation or expiration.
+ */
+ result: MessageBatchResult;
+}
+
+export interface MessageBatchRequestCounts {
+ /**
+ * Number of requests in the Message Batch that have been canceled.
+ *
+ * This is zero until processing of the entire Message Batch has ended.
+ */
+ canceled: number;
+
+ /**
+ * Number of requests in the Message Batch that encountered an error.
+ *
+ * This is zero until processing of the entire Message Batch has ended.
+ */
+ errored: number;
+
+ /**
+ * Number of requests in the Message Batch that have expired.
+ *
+ * This is zero until processing of the entire Message Batch has ended.
+ */
+ expired: number;
+
+ /**
+ * Number of requests in the Message Batch that are processing.
+ */
+ processing: number;
+
+ /**
+ * Number of requests in the Message Batch that have completed successfully.
+ *
+ * This is zero until processing of the entire Message Batch has ended.
+ */
+ succeeded: number;
+}
+
+/**
+ * Processing result for this request.
+ *
+ * Contains a Message output if processing was successful, an error response if
+ * processing failed, or the reason why processing was not attempted, such as
+ * cancellation or expiration.
+ */
+export type MessageBatchResult =
+ | MessageBatchSucceededResult
+ | MessageBatchErroredResult
+ | MessageBatchCanceledResult
+ | MessageBatchExpiredResult;
+
+export interface MessageBatchSucceededResult {
+ message: MessagesAPI.Message;
+
+ type: 'succeeded';
+}
+
+export interface BatchCreateParams {
+ /**
+ * List of requests for prompt completion. Each is an individual request to create
+ * a Message.
+ */
+ requests: Array;
+}
+
+export namespace BatchCreateParams {
+ export interface Request {
+ /**
+ * Developer-provided ID created for each request in a Message Batch. Useful for
+ * matching results to requests, as results may be given out of request order.
+ *
+ * Must be unique for each request within the Message Batch.
+ */
+ custom_id: string;
+
+ /**
+ * Messages API creation parameters for the individual request.
+ *
+ * See the [Messages API reference](/en/api/messages) for full documentation on
+ * available parameters.
+ */
+ params: MessagesAPI.MessageCreateParamsNonStreaming;
+ }
+}
+
+export interface BatchListParams extends PageParams {}
+
+Batches.MessageBatchesPage = MessageBatchesPage;
+
+export declare namespace Batches {
+ export {
+ type MessageBatch as MessageBatch,
+ type MessageBatchCanceledResult as MessageBatchCanceledResult,
+ type MessageBatchErroredResult as MessageBatchErroredResult,
+ type MessageBatchExpiredResult as MessageBatchExpiredResult,
+ type MessageBatchIndividualResponse as MessageBatchIndividualResponse,
+ type MessageBatchRequestCounts as MessageBatchRequestCounts,
+ type MessageBatchResult as MessageBatchResult,
+ type MessageBatchSucceededResult as MessageBatchSucceededResult,
+ MessageBatchesPage as MessageBatchesPage,
+ type BatchCreateParams as BatchCreateParams,
+ type BatchListParams as BatchListParams,
+ };
+}
diff --git a/src/resources/messages/index.ts b/src/resources/messages/index.ts
new file mode 100644
index 00000000..10308d2a
--- /dev/null
+++ b/src/resources/messages/index.ts
@@ -0,0 +1,63 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export {
+ MessageBatchesPage,
+ Batches,
+ type MessageBatch,
+ type MessageBatchCanceledResult,
+ type MessageBatchErroredResult,
+ type MessageBatchExpiredResult,
+ type MessageBatchIndividualResponse,
+ type MessageBatchRequestCounts,
+ type MessageBatchResult,
+ type MessageBatchSucceededResult,
+ type BatchCreateParams,
+ type BatchListParams,
+} from './batches';
+export {
+ Messages,
+ type Base64PDFSource,
+ type CacheControlEphemeral,
+ type ContentBlock,
+ type ContentBlockDeltaEvent,
+ type ContentBlockParam,
+ type ContentBlockStartEvent,
+ type ContentBlockStopEvent,
+ type DocumentBlockParam,
+ type ImageBlockParam,
+ type InputJSONDelta,
+ type Message,
+ type MessageDeltaEvent,
+ type MessageDeltaUsage,
+ type MessageParam,
+ type MessageStartEvent,
+ type MessageStopEvent,
+ type MessageStreamEvent,
+ type MessageTokensCount,
+ type Metadata,
+ type Model,
+ type RawContentBlockDeltaEvent,
+ type RawContentBlockStartEvent,
+ type RawContentBlockStopEvent,
+ type RawMessageDeltaEvent,
+ type RawMessageStartEvent,
+ type RawMessageStopEvent,
+ type RawMessageStreamEvent,
+ type TextBlock,
+ type TextBlockParam,
+ type TextDelta,
+ type Tool,
+ type ToolChoice,
+ type ToolChoiceAny,
+ type ToolChoiceAuto,
+ type ToolChoiceTool,
+ type ToolResultBlockParam,
+ type ToolUseBlock,
+ type ToolUseBlockParam,
+ type Usage,
+ type MessageCreateParams,
+ type MessageCreateParamsBase,
+ type MessageCreateParamsNonStreaming,
+ type MessageCreateParamsStreaming,
+ type MessageCountTokensParams,
+} from './messages';
diff --git a/src/resources/messages.ts b/src/resources/messages/messages.ts
similarity index 71%
rename from src/resources/messages.ts
rename to src/resources/messages/messages.ts
index e8e633c8..a1affbf5 100644
--- a/src/resources/messages.ts
+++ b/src/resources/messages/messages.ts
@@ -1,15 +1,32 @@
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-import { APIResource } from '../resource';
-import { APIPromise } from '../core';
-import * as Core from '../core';
+import { APIResource } from '../../resource';
+import { APIPromise } from '../../core';
+import * as Core from '../../core';
import * as MessagesAPI from './messages';
-import { Stream } from '../streaming';
-import { MessageStream } from '../lib/MessageStream';
-
-export { MessageStream } from '../lib/MessageStream';
+import * as BatchesAPI from './batches';
+import {
+ BatchCreateParams,
+ BatchListParams,
+ Batches,
+ MessageBatch,
+ MessageBatchCanceledResult,
+ MessageBatchErroredResult,
+ MessageBatchExpiredResult,
+ MessageBatchIndividualResponse,
+ MessageBatchRequestCounts,
+ MessageBatchResult,
+ MessageBatchSucceededResult,
+ MessageBatchesPage,
+} from './batches';
+import { Stream } from '../../streaming';
+import { MessageStream } from '../../lib/MessageStream';
+
+export { MessageStream } from '../../lib/MessageStream';
export class Messages extends APIResource {
+ batches: BatchesAPI.Batches = new BatchesAPI.Batches(this._client);
+
/**
* Send a structured list of input messages with text and/or image content, and the
* model will generate the next message in the conversation.
@@ -51,22 +68,62 @@ export class Messages extends APIResource {
stream(body: MessageStreamParams, options?: Core.RequestOptions): MessageStream {
return MessageStream.createMessage(this, body, options);
}
+
+ /**
+ * Count the number of tokens in a Message.
+ *
+ * The Token Count API can be used to count the number of tokens in a Message,
+ * including tools, images, and documents, without creating it.
+ */
+ countTokens(
+ body: MessageCountTokensParams,
+ options?: Core.RequestOptions,
+ ): Core.APIPromise {
+ return this._client.post('/v1/messages/count_tokens', { body, ...options });
+ }
+}
+
+export interface Base64PDFSource {
+ data: string;
+
+ media_type: 'application/pdf';
+
+ type: 'base64';
+}
+
+export interface CacheControlEphemeral {
+ type: 'ephemeral';
}
export type ContentBlock = TextBlock | ToolUseBlock;
export type ContentBlockDeltaEvent = RawContentBlockDeltaEvent;
-export type ContentBlockParam = TextBlockParam | ImageBlockParam | ToolUseBlockParam | ToolResultBlockParam;
+export type ContentBlockParam =
+ | TextBlockParam
+ | ImageBlockParam
+ | ToolUseBlockParam
+ | ToolResultBlockParam
+ | DocumentBlockParam;
export type ContentBlockStartEvent = RawContentBlockStartEvent;
export type ContentBlockStopEvent = RawContentBlockStopEvent;
+export interface DocumentBlockParam {
+ source: Base64PDFSource;
+
+ type: 'document';
+
+ cache_control?: CacheControlEphemeral | null;
+}
+
export interface ImageBlockParam {
source: ImageBlockParam.Source;
type: 'image';
+
+ cache_control?: CacheControlEphemeral | null;
}
export namespace ImageBlockParam {
@@ -213,6 +270,14 @@ export type MessageStopEvent = RawMessageStopEvent;
export type MessageStreamEvent = RawMessageStreamEvent;
+export interface MessageTokensCount {
+ /**
+ * The total number of tokens across the provided list of messages, system prompt,
+ * and tools.
+ */
+ input_tokens: number;
+}
+
export interface Metadata {
/**
* An external identifier for the user who is associated with the request.
@@ -335,6 +400,8 @@ export interface TextBlockParam {
text: string;
type: 'text';
+
+ cache_control?: CacheControlEphemeral | null;
}
export interface TextDelta {
@@ -359,6 +426,8 @@ export interface Tool {
*/
name: string;
+ cache_control?: CacheControlEphemeral | null;
+
/**
* Description of what this tool does.
*
@@ -446,6 +515,8 @@ export interface ToolResultBlockParam {
type: 'tool_result';
+ cache_control?: CacheControlEphemeral | null;
+
content?: string | Array;
is_error?: boolean;
@@ -469,9 +540,21 @@ export interface ToolUseBlockParam {
name: string;
type: 'tool_use';
+
+ cache_control?: CacheControlEphemeral | null;
}
export interface Usage {
+ /**
+ * The number of input tokens used to create the cache entry.
+ */
+ cache_creation_input_tokens: number | null;
+
+ /**
+ * The number of input tokens read from the cache.
+ */
+ cache_read_input_tokens: number | null;
+
/**
* The number of input tokens which were used.
*/
@@ -791,13 +874,205 @@ export interface MessageCreateParamsStreaming extends MessageCreateParamsBase {
export type MessageStreamParams = MessageCreateParamsBase;
+export interface MessageCountTokensParams {
+ /**
+ * Input messages.
+ *
+ * Our models are trained to operate on alternating `user` and `assistant`
+ * conversational turns. When creating a new `Message`, you specify the prior
+ * conversational turns with the `messages` parameter, and the model then generates
+ * the next `Message` in the conversation. Consecutive `user` or `assistant` turns
+ * in your request will be combined into a single turn.
+ *
+ * Each input message must be an object with a `role` and `content`. You can
+ * specify a single `user`-role message, or you can include multiple `user` and
+ * `assistant` messages.
+ *
+ * If the final message uses the `assistant` role, the response content will
+ * continue immediately from the content in that message. This can be used to
+ * constrain part of the model's response.
+ *
+ * Example with a single `user` message:
+ *
+ * ```json
+ * [{ "role": "user", "content": "Hello, Claude" }]
+ * ```
+ *
+ * Example with multiple conversational turns:
+ *
+ * ```json
+ * [
+ * { "role": "user", "content": "Hello there." },
+ * { "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
+ * { "role": "user", "content": "Can you explain LLMs in plain English?" }
+ * ]
+ * ```
+ *
+ * Example with a partially-filled response from Claude:
+ *
+ * ```json
+ * [
+ * {
+ * "role": "user",
+ * "content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
+ * },
+ * { "role": "assistant", "content": "The best answer is (" }
+ * ]
+ * ```
+ *
+ * Each input message `content` may be either a single `string` or an array of
+ * content blocks, where each block has a specific `type`. Using a `string` for
+ * `content` is shorthand for an array of one content block of type `"text"`. The
+ * following input messages are equivalent:
+ *
+ * ```json
+ * { "role": "user", "content": "Hello, Claude" }
+ * ```
+ *
+ * ```json
+ * { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
+ * ```
+ *
+ * Starting with Claude 3 models, you can also send image content blocks:
+ *
+ * ```json
+ * {
+ * "role": "user",
+ * "content": [
+ * {
+ * "type": "image",
+ * "source": {
+ * "type": "base64",
+ * "media_type": "image/jpeg",
+ * "data": "/9j/4AAQSkZJRg..."
+ * }
+ * },
+ * { "type": "text", "text": "What is in this image?" }
+ * ]
+ * }
+ * ```
+ *
+ * We currently support the `base64` source type for images, and the `image/jpeg`,
+ * `image/png`, `image/gif`, and `image/webp` media types.
+ *
+ * See [examples](https://docs.anthropic.com/en/api/messages-examples#vision) for
+ * more input examples.
+ *
+ * Note that if you want to include a
+ * [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
+ * the top-level `system` parameter — there is no `"system"` role for input
+ * messages in the Messages API.
+ */
+ messages: Array;
+
+ /**
+ * The model that will complete your prompt.\n\nSee
+ * [models](https://docs.anthropic.com/en/docs/models-overview) for additional
+ * details and options.
+ */
+ model: Model;
+
+ /**
+ * System prompt.
+ *
+ * A system prompt is a way of providing context and instructions to Claude, such
+ * as specifying a particular goal or role. See our
+ * [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
+ */
+ system?: string | Array;
+
+ /**
+ * How the model should use the provided tools. The model can use a specific tool,
+ * any available tool, or decide by itself.
+ */
+ tool_choice?: ToolChoice;
+
+ /**
+ * Definitions of tools that the model may use.
+ *
+ * If you include `tools` in your API request, the model may return `tool_use`
+ * content blocks that represent the model's use of those tools. You can then run
+ * those tools using the tool input generated by the model and then optionally
+ * return results back to the model using `tool_result` content blocks.
+ *
+ * Each tool definition includes:
+ *
+ * - `name`: Name of the tool.
+ * - `description`: Optional, but strongly-recommended description of the tool.
+ * - `input_schema`: [JSON schema](https://json-schema.org/) for the tool `input`
+ * shape that the model will produce in `tool_use` output content blocks.
+ *
+ * For example, if you defined `tools` as:
+ *
+ * ```json
+ * [
+ * {
+ * "name": "get_stock_price",
+ * "description": "Get the current stock price for a given ticker symbol.",
+ * "input_schema": {
+ * "type": "object",
+ * "properties": {
+ * "ticker": {
+ * "type": "string",
+ * "description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
+ * }
+ * },
+ * "required": ["ticker"]
+ * }
+ * }
+ * ]
+ * ```
+ *
+ * And then asked the model "What's the S&P 500 at today?", the model might produce
+ * `tool_use` content blocks in the response like this:
+ *
+ * ```json
+ * [
+ * {
+ * "type": "tool_use",
+ * "id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ * "name": "get_stock_price",
+ * "input": { "ticker": "^GSPC" }
+ * }
+ * ]
+ * ```
+ *
+ * You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
+ * input, and return the following back to the model in a subsequent `user`
+ * message:
+ *
+ * ```json
+ * [
+ * {
+ * "type": "tool_result",
+ * "tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
+ * "content": "259.75 USD"
+ * }
+ * ]
+ * ```
+ *
+ * Tools can be used for workflows that include running client-side tools and
+ * functions, or more generally whenever you want the model to produce a particular
+ * JSON structure of output.
+ *
+ * See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
+ */
+ tools?: Array;
+}
+
+Messages.Batches = Batches;
+Messages.MessageBatchesPage = MessageBatchesPage;
+
export declare namespace Messages {
export {
+ type Base64PDFSource as Base64PDFSource,
+ type CacheControlEphemeral as CacheControlEphemeral,
type ContentBlock as ContentBlock,
type ContentBlockDeltaEvent as ContentBlockDeltaEvent,
type ContentBlockParam as ContentBlockParam,
type ContentBlockStartEvent as ContentBlockStartEvent,
type ContentBlockStopEvent as ContentBlockStopEvent,
+ type DocumentBlockParam as DocumentBlockParam,
type ImageBlockParam as ImageBlockParam,
type InputJsonDelta as InputJsonDelta,
type InputJSONDelta as InputJSONDelta,
@@ -808,6 +1083,7 @@ export declare namespace Messages {
type MessageStartEvent as MessageStartEvent,
type MessageStopEvent as MessageStopEvent,
type MessageStreamEvent as MessageStreamEvent,
+ type MessageTokensCount as MessageTokensCount,
type Metadata as Metadata,
type Model as Model,
type RawContentBlockDeltaEvent as RawContentBlockDeltaEvent,
@@ -833,5 +1109,21 @@ export declare namespace Messages {
type MessageCreateParamsNonStreaming as MessageCreateParamsNonStreaming,
type MessageCreateParamsStreaming as MessageCreateParamsStreaming,
type MessageStreamParams as MessageStreamParams,
+ type MessageCountTokensParams as MessageCountTokensParams,
+ };
+
+ export {
+ Batches as Batches,
+ type MessageBatch as MessageBatch,
+ type MessageBatchCanceledResult as MessageBatchCanceledResult,
+ type MessageBatchErroredResult as MessageBatchErroredResult,
+ type MessageBatchExpiredResult as MessageBatchExpiredResult,
+ type MessageBatchIndividualResponse as MessageBatchIndividualResponse,
+ type MessageBatchRequestCounts as MessageBatchRequestCounts,
+ type MessageBatchResult as MessageBatchResult,
+ type MessageBatchSucceededResult as MessageBatchSucceededResult,
+ MessageBatchesPage as MessageBatchesPage,
+ type BatchCreateParams as BatchCreateParams,
+ type BatchListParams as BatchListParams,
};
}
diff --git a/src/resources/models.ts b/src/resources/models.ts
new file mode 100644
index 00000000..50e80399
--- /dev/null
+++ b/src/resources/models.ts
@@ -0,0 +1,75 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import { APIResource } from '../resource';
+import { isRequestOptions } from '../core';
+import * as Core from '../core';
+import { Page, type PageParams } from '../pagination';
+
+export class Models extends APIResource {
+ /**
+ * Get a specific model.
+ *
+ * The Models API response can be used to determine information about a specific
+ * model or resolve a model alias to a model ID.
+ */
+ retrieve(modelId: string, options?: Core.RequestOptions): Core.APIPromise {
+ return this._client.get(`/v1/models/${modelId}`, options);
+ }
+
+ /**
+ * List available models.
+ *
+ * The Models API response can be used to determine which models are available for
+ * use in the API. More recently released models are listed first.
+ */
+ list(query?: ModelListParams, options?: Core.RequestOptions): Core.PagePromise;
+ list(options?: Core.RequestOptions): Core.PagePromise;
+ list(
+ query: ModelListParams | Core.RequestOptions = {},
+ options?: Core.RequestOptions,
+ ): Core.PagePromise {
+ if (isRequestOptions(query)) {
+ return this.list({}, query);
+ }
+ return this._client.getAPIList('/v1/models', ModelInfosPage, { query, ...options });
+ }
+}
+
+export class ModelInfosPage extends Page {}
+
+export interface ModelInfo {
+ /**
+ * Unique model identifier.
+ */
+ id: string;
+
+ /**
+ * RFC 3339 datetime string representing the time at which the model was released.
+ * May be set to an epoch value if the release date is unknown.
+ */
+ created_at: string;
+
+ /**
+ * A human-readable name for the model.
+ */
+ display_name: string;
+
+ /**
+ * Object type.
+ *
+ * For Models, this is always `"model"`.
+ */
+ type: 'model';
+}
+
+export interface ModelListParams extends PageParams {}
+
+Models.ModelInfosPage = ModelInfosPage;
+
+export declare namespace Models {
+ export {
+ type ModelInfo as ModelInfo,
+ ModelInfosPage as ModelInfosPage,
+ type ModelListParams as ModelListParams,
+ };
+}
diff --git a/src/resources/shared.ts b/src/resources/shared.ts
new file mode 100644
index 00000000..d731c1f9
--- /dev/null
+++ b/src/resources/shared.ts
@@ -0,0 +1,72 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+export interface APIErrorObject {
+ message: string;
+
+ type: 'api_error';
+}
+
+export interface AuthenticationError {
+ message: string;
+
+ type: 'authentication_error';
+}
+
+export interface BillingError {
+ message: string;
+
+ type: 'billing_error';
+}
+
+export type ErrorObject =
+ | InvalidRequestError
+ | AuthenticationError
+ | BillingError
+ | PermissionError
+ | NotFoundError
+ | RateLimitError
+ | GatewayTimeoutError
+ | APIErrorObject
+ | OverloadedError;
+
+export interface ErrorResponse {
+ error: ErrorObject;
+
+ type: 'error';
+}
+
+export interface GatewayTimeoutError {
+ message: string;
+
+ type: 'timeout_error';
+}
+
+export interface InvalidRequestError {
+ message: string;
+
+ type: 'invalid_request_error';
+}
+
+export interface NotFoundError {
+ message: string;
+
+ type: 'not_found_error';
+}
+
+export interface OverloadedError {
+ message: string;
+
+ type: 'overloaded_error';
+}
+
+export interface PermissionError {
+ message: string;
+
+ type: 'permission_error';
+}
+
+export interface RateLimitError {
+ message: string;
+
+ type: 'rate_limit_error';
+}
diff --git a/tests/api-resources/MessageStream.test.ts b/tests/api-resources/MessageStream.test.ts
index 81b9c81e..0051d397 100644
--- a/tests/api-resources/MessageStream.test.ts
+++ b/tests/api-resources/MessageStream.test.ts
@@ -149,7 +149,12 @@ describe('MessageStream class', () => {
model: 'claude-3-opus-20240229',
stop_reason: 'end_turn',
stop_sequence: null,
- usage: { output_tokens: 6, input_tokens: 10 },
+ usage: {
+ output_tokens: 6,
+ input_tokens: 10,
+ cache_creation_input_tokens: null,
+ cache_read_input_tokens: null,
+ },
}),
);
@@ -209,22 +214,22 @@ describe('MessageStream class', () => {
},
{
"args": [
- "{"type":"message_start","message":{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message_start","message":{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10,"cache_creation_input_tokens":null,"cache_read_input_tokens":null}}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10,"cache_creation_input_tokens":null,"cache_read_input_tokens":null}}",
],
"type": "streamEvent",
},
{
"args": [
"{"type":"content_block_start","content_block":{"type":"text","text":""},"index":0}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":""}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":""}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10,"cache_creation_input_tokens":null,"cache_read_input_tokens":null}}",
],
"type": "streamEvent",
},
{
"args": [
"{"type":"content_block_delta","delta":{"type":"text_delta","text":"Hello"},"index":0}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10,"cache_creation_input_tokens":null,"cache_read_input_tokens":null}}",
],
"type": "streamEvent",
},
@@ -238,7 +243,7 @@ describe('MessageStream class', () => {
{
"args": [
"{"type":"content_block_delta","delta":{"type":"text_delta","text":" ther"},"index":0}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello ther"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello ther"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10,"cache_creation_input_tokens":null,"cache_read_input_tokens":null}}",
],
"type": "streamEvent",
},
@@ -252,7 +257,7 @@ describe('MessageStream class', () => {
{
"args": [
"{"type":"content_block_delta","delta":{"type":"text_delta","text":"e!"},"index":0}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10,"cache_creation_input_tokens":null,"cache_read_input_tokens":null}}",
],
"type": "streamEvent",
},
@@ -266,7 +271,7 @@ describe('MessageStream class', () => {
{
"args": [
"{"type":"content_block_stop","index":0}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":null,"stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10,"cache_creation_input_tokens":null,"cache_read_input_tokens":null}}",
],
"type": "streamEvent",
},
@@ -279,26 +284,26 @@ describe('MessageStream class', () => {
{
"args": [
"{"type":"message_delta","usage":{"output_tokens":6},"delta":{"stop_reason":"end_turn","stop_sequence":null}}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10,"cache_creation_input_tokens":null,"cache_read_input_tokens":null}}",
],
"type": "streamEvent",
},
{
"args": [
"{"type":"message_stop"}",
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10,"cache_creation_input_tokens":null,"cache_read_input_tokens":null}}",
],
"type": "streamEvent",
},
{
"args": [
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10,"cache_creation_input_tokens":null,"cache_read_input_tokens":null}}",
],
"type": "message",
},
{
"args": [
- "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10}}",
+ "{"type":"message","id":"msg_01hhptzfxdaeehfxfv070yb6b8","role":"assistant","content":[{"type":"text","text":"Hello there!"}],"model":"claude-3-opus-20240229","stop_reason":"end_turn","stop_sequence":null,"usage":{"output_tokens":6,"input_tokens":10,"cache_creation_input_tokens":null,"cache_read_input_tokens":null}}",
],
"type": "finalMessage",
},
@@ -326,6 +331,8 @@ describe('MessageStream class', () => {
"stop_sequence": null,
"type": "message",
"usage": {
+ "cache_creation_input_tokens": null,
+ "cache_read_input_tokens": null,
"input_tokens": 10,
"output_tokens": 6,
},
@@ -353,7 +360,12 @@ describe('MessageStream class', () => {
model: 'claude-3-opus-20240229',
stop_reason: 'end_turn',
stop_sequence: null,
- usage: { output_tokens: 6, input_tokens: 10 },
+ usage: {
+ output_tokens: 6,
+ input_tokens: 10,
+ cache_creation_input_tokens: null,
+ cache_read_input_tokens: null,
+ },
}),
);
diff --git a/tests/api-resources/beta/models.test.ts b/tests/api-resources/beta/models.test.ts
new file mode 100644
index 00000000..f155b632
--- /dev/null
+++ b/tests/api-resources/beta/models.test.ts
@@ -0,0 +1,57 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import Anthropic from '@anthropic-ai/sdk';
+import { Response } from 'node-fetch';
+
+const client = new Anthropic({
+ apiKey: 'my-anthropic-api-key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource models', () => {
+ test('retrieve', async () => {
+ const responsePromise = client.beta.models.retrieve('model_id');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.beta.models.retrieve('model_id', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(Anthropic.NotFoundError);
+ });
+
+ test('list', async () => {
+ const responsePromise = client.beta.models.list();
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(client.beta.models.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
+ Anthropic.NotFoundError,
+ );
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.beta.models.list(
+ { after_id: 'after_id', before_id: 'before_id', limit: 1 },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(Anthropic.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/beta/prompt-caching/messages.test.ts b/tests/api-resources/beta/prompt-caching/messages.test.ts
deleted file mode 100644
index 39ecdfab..00000000
--- a/tests/api-resources/beta/prompt-caching/messages.test.ts
+++ /dev/null
@@ -1,57 +0,0 @@
-// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-import Anthropic from '@anthropic-ai/sdk';
-import { Response } from 'node-fetch';
-
-const client = new Anthropic({
- apiKey: 'my-anthropic-api-key',
- baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
-});
-
-describe('resource messages', () => {
- test('create: only required params', async () => {
- const responsePromise = client.beta.promptCaching.messages.create({
- max_tokens: 1024,
- messages: [{ content: 'Hello, world', role: 'user' }],
- model: 'claude-3-5-sonnet-20241022',
- });
- const rawResponse = await responsePromise.asResponse();
- expect(rawResponse).toBeInstanceOf(Response);
- const response = await responsePromise;
- expect(response).not.toBeInstanceOf(Response);
- const dataAndResponse = await responsePromise.withResponse();
- expect(dataAndResponse.data).toBe(response);
- expect(dataAndResponse.response).toBe(rawResponse);
- });
-
- test('create: required and optional params', async () => {
- const response = await client.beta.promptCaching.messages.create({
- max_tokens: 1024,
- messages: [{ content: 'Hello, world', role: 'user' }],
- model: 'claude-3-5-sonnet-20241022',
- metadata: { user_id: '13803d75-b4b5-4c3e-b2a2-6f21399b021b' },
- stop_sequences: ['string'],
- stream: false,
- system: [{ text: "Today's date is 2024-06-01.", type: 'text', cache_control: { type: 'ephemeral' } }],
- temperature: 1,
- tool_choice: { type: 'auto', disable_parallel_tool_use: true },
- tools: [
- {
- input_schema: {
- type: 'object',
- properties: {
- location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' },
- unit: { description: 'Unit for the output - one of (celsius, fahrenheit)', type: 'string' },
- },
- },
- name: 'x',
- cache_control: { type: 'ephemeral' },
- description: 'Get the current weather in a given location',
- },
- ],
- top_k: 5,
- top_p: 0.7,
- betas: ['string'],
- });
- });
-});
diff --git a/tests/api-resources/messages/batches.test.ts b/tests/api-resources/messages/batches.test.ts
new file mode 100644
index 00000000..26efdbc8
--- /dev/null
+++ b/tests/api-resources/messages/batches.test.ts
@@ -0,0 +1,145 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import Anthropic from '@anthropic-ai/sdk';
+import { Response } from 'node-fetch';
+
+const client = new Anthropic({
+ apiKey: 'my-anthropic-api-key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource batches', () => {
+ test('create: only required params', async () => {
+ const responsePromise = client.messages.batches.create({
+ requests: [
+ {
+ custom_id: 'my-custom-id-1',
+ params: {
+ max_tokens: 1024,
+ messages: [{ content: 'Hello, world', role: 'user' }],
+ model: 'claude-3-5-sonnet-20241022',
+ },
+ },
+ ],
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('create: required and optional params', async () => {
+ const response = await client.messages.batches.create({
+ requests: [
+ {
+ custom_id: 'my-custom-id-1',
+ params: {
+ max_tokens: 1024,
+ messages: [{ content: 'Hello, world', role: 'user' }],
+ model: 'claude-3-5-sonnet-20241022',
+ metadata: { user_id: '13803d75-b4b5-4c3e-b2a2-6f21399b021b' },
+ stop_sequences: ['string'],
+ system: [
+ { text: "Today's date is 2024-06-01.", type: 'text', cache_control: { type: 'ephemeral' } },
+ ],
+ temperature: 1,
+ tool_choice: { type: 'auto', disable_parallel_tool_use: true },
+ tools: [
+ {
+ input_schema: {
+ type: 'object',
+ properties: {
+ location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' },
+ unit: {
+ description: 'Unit for the output - one of (celsius, fahrenheit)',
+ type: 'string',
+ },
+ },
+ },
+ name: 'x',
+ cache_control: { type: 'ephemeral' },
+ description: 'Get the current weather in a given location',
+ },
+ ],
+ top_k: 5,
+ top_p: 0.7,
+ },
+ },
+ ],
+ });
+ });
+
+ test('retrieve', async () => {
+ const responsePromise = client.messages.batches.retrieve('message_batch_id');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.messages.batches.retrieve('message_batch_id', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(Anthropic.NotFoundError);
+ });
+
+ test('list', async () => {
+ const responsePromise = client.messages.batches.list();
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(client.messages.batches.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
+ Anthropic.NotFoundError,
+ );
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.messages.batches.list(
+ { after_id: 'after_id', before_id: 'before_id', limit: 1 },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(Anthropic.NotFoundError);
+ });
+
+ test('cancel', async () => {
+ const responsePromise = client.messages.batches.cancel('message_batch_id');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('cancel: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.messages.batches.cancel('message_batch_id', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(Anthropic.NotFoundError);
+ });
+
+ test('results: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.messages.batches.results('message_batch_id', { path: '/_stainless_unknown_path' }),
+ ).rejects.toThrow(Anthropic.NotFoundError);
+ });
+});
diff --git a/tests/api-resources/messages.test.ts b/tests/api-resources/messages/messages.test.ts
similarity index 65%
rename from tests/api-resources/messages.test.ts
rename to tests/api-resources/messages/messages.test.ts
index 17ae6e52..3ae41d32 100644
--- a/tests/api-resources/messages.test.ts
+++ b/tests/api-resources/messages/messages.test.ts
@@ -32,7 +32,7 @@ describe('resource messages', () => {
metadata: { user_id: '13803d75-b4b5-4c3e-b2a2-6f21399b021b' },
stop_sequences: ['string'],
stream: false,
- system: [{ text: "Today's date is 2024-06-01.", type: 'text' }],
+ system: [{ text: "Today's date is 2024-06-01.", type: 'text', cache_control: { type: 'ephemeral' } }],
temperature: 1,
tool_choice: { type: 'auto', disable_parallel_tool_use: true },
tools: [
@@ -45,6 +45,7 @@ describe('resource messages', () => {
},
},
name: 'x',
+ cache_control: { type: 'ephemeral' },
description: 'Get the current weather in a given location',
},
],
@@ -52,6 +53,43 @@ describe('resource messages', () => {
top_p: 0.7,
});
});
+
+ test('countTokens: only required params', async () => {
+ const responsePromise = client.messages.countTokens({
+ messages: [{ content: 'string', role: 'user' }],
+ model: 'string',
+ });
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('countTokens: required and optional params', async () => {
+ const response = await client.messages.countTokens({
+ messages: [{ content: 'string', role: 'user' }],
+ model: 'string',
+ system: [{ text: "Today's date is 2024-06-01.", type: 'text', cache_control: { type: 'ephemeral' } }],
+ tool_choice: { type: 'auto', disable_parallel_tool_use: true },
+ tools: [
+ {
+ input_schema: {
+ type: 'object',
+ properties: {
+ location: { description: 'The city and state, e.g. San Francisco, CA', type: 'string' },
+ unit: { description: 'Unit for the output - one of (celsius, fahrenheit)', type: 'string' },
+ },
+ },
+ name: 'x',
+ cache_control: { type: 'ephemeral' },
+ description: 'Get the current weather in a given location',
+ },
+ ],
+ });
+ });
});
test('create: warns when using a deprecated model', async () => {
diff --git a/tests/api-resources/models.test.ts b/tests/api-resources/models.test.ts
new file mode 100644
index 00000000..7f5c0411
--- /dev/null
+++ b/tests/api-resources/models.test.ts
@@ -0,0 +1,57 @@
+// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import Anthropic from '@anthropic-ai/sdk';
+import { Response } from 'node-fetch';
+
+const client = new Anthropic({
+ apiKey: 'my-anthropic-api-key',
+ baseURL: process.env['TEST_API_BASE_URL'] ?? 'http://127.0.0.1:4010',
+});
+
+describe('resource models', () => {
+ test('retrieve', async () => {
+ const responsePromise = client.models.retrieve('model_id');
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('retrieve: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(client.models.retrieve('model_id', { path: '/_stainless_unknown_path' })).rejects.toThrow(
+ Anthropic.NotFoundError,
+ );
+ });
+
+ test('list', async () => {
+ const responsePromise = client.models.list();
+ const rawResponse = await responsePromise.asResponse();
+ expect(rawResponse).toBeInstanceOf(Response);
+ const response = await responsePromise;
+ expect(response).not.toBeInstanceOf(Response);
+ const dataAndResponse = await responsePromise.withResponse();
+ expect(dataAndResponse.data).toBe(response);
+ expect(dataAndResponse.response).toBe(rawResponse);
+ });
+
+ test('list: request options instead of params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(client.models.list({ path: '/_stainless_unknown_path' })).rejects.toThrow(
+ Anthropic.NotFoundError,
+ );
+ });
+
+ test('list: request options and params are passed correctly', async () => {
+ // ensure the request options are being passed correctly by passing an invalid HTTP method in order to cause an error
+ await expect(
+ client.models.list(
+ { after_id: 'after_id', before_id: 'before_id', limit: 1 },
+ { path: '/_stainless_unknown_path' },
+ ),
+ ).rejects.toThrow(Anthropic.NotFoundError);
+ });
+});