diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ffcd85673c..db912a0d0f 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.95.1" + ".": "1.96.0" } \ No newline at end of file diff --git a/.stats.yml b/.stats.yml index b82cec4eb6..12a179baf6 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-de3e91790d0b9f3ce26d679ac07079880ccc695bd8c878f961c4d577a5025a2e.yml -openapi_spec_hash: 4b44e3f287583d01fbe7b10cd943254a -config_hash: 06b9a88561844d60d8efa4eaabf5fa3c +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-82fd6fcb3eea81cbbe09a6f831c82219f1251e1b76474b4c41f424bf277e6a71.yml +openapi_spec_hash: c8d54bd1ae3d704f6b6f72ffd2f876d8 +config_hash: 3315d58b60faf63b1bee251b81837cda diff --git a/CHANGELOG.md b/CHANGELOG.md index 14d61de1bf..c91c4c4b35 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 1.96.0 (2025-07-15) + +Full Changelog: [v1.95.1...v1.96.0](https://github.com/openai/openai-python/compare/v1.95.1...v1.96.0) + +### Features + +* clean up environment call outs ([87c2e97](https://github.com/openai/openai-python/commit/87c2e979e0ec37347b7f595c2696408acd25fe20)) + + +### Chores + +* **api:** update realtime specs, build config ([bf06d88](https://github.com/openai/openai-python/commit/bf06d88b33f9af82a51d9a8af5b7a38925906f7a)) + ## 1.95.1 (2025-07-11) Full Changelog: [v1.95.0...v1.95.1](https://github.com/openai/openai-python/compare/v1.95.0...v1.95.1) diff --git a/README.md b/README.md index d09de14f3c..d4b8d8d170 100644 --- a/README.md +++ b/README.md @@ -160,7 +160,6 @@ pip install openai[aiohttp] Then you can enable it by instantiating the client with `http_client=DefaultAioHttpClient()`: ```python -import os import asyncio from openai import DefaultAioHttpClient from openai import AsyncOpenAI @@ -168,7 +167,7 @@ from openai import AsyncOpenAI async def main() -> None: async with AsyncOpenAI( - api_key=os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted + api_key="My API Key", http_client=DefaultAioHttpClient(), ) as client: chat_completion = await client.chat.completions.create( diff --git a/pyproject.toml b/pyproject.toml index d9305c5469..65055d926a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.95.1" +version = "1.96.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 6e2b83bbaa..b1025f4a31 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.95.1" # x-release-please-version +__version__ = "1.96.0" # x-release-please-version diff --git a/src/openai/types/beta/realtime/conversation_item.py b/src/openai/types/beta/realtime/conversation_item.py index 4edf6c4d5f..21b7a8ac1f 100644 --- a/src/openai/types/beta/realtime/conversation_item.py +++ b/src/openai/types/beta/realtime/conversation_item.py @@ -50,8 +50,8 @@ class ConversationItem(BaseModel): for `message` items. """ - status: Optional[Literal["completed", "incomplete"]] = None - """The status of the item (`completed`, `incomplete`). + status: Optional[Literal["completed", "incomplete", "in_progress"]] = None + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/src/openai/types/beta/realtime/conversation_item_created_event.py b/src/openai/types/beta/realtime/conversation_item_created_event.py index 2f20388246..aea7ad5b4b 100644 --- a/src/openai/types/beta/realtime/conversation_item_created_event.py +++ b/src/openai/types/beta/realtime/conversation_item_created_event.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Optional from typing_extensions import Literal from ...._models import BaseModel @@ -15,11 +16,12 @@ class ConversationItemCreatedEvent(BaseModel): item: ConversationItem """The item to add to the conversation.""" - previous_item_id: str + type: Literal["conversation.item.created"] + """The event type, must be `conversation.item.created`.""" + + previous_item_id: Optional[str] = None """ The ID of the preceding item in the Conversation context, allows the client to - understand the order of the conversation. + understand the order of the conversation. Can be `null` if the item has no + predecessor. """ - - type: Literal["conversation.item.created"] - """The event type, must be `conversation.item.created`.""" diff --git a/src/openai/types/beta/realtime/conversation_item_param.py b/src/openai/types/beta/realtime/conversation_item_param.py index ac0f8431e5..8bbd539c0c 100644 --- a/src/openai/types/beta/realtime/conversation_item_param.py +++ b/src/openai/types/beta/realtime/conversation_item_param.py @@ -51,8 +51,8 @@ class ConversationItemParam(TypedDict, total=False): for `message` items. """ - status: Literal["completed", "incomplete"] - """The status of the item (`completed`, `incomplete`). + status: Literal["completed", "incomplete", "in_progress"] + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/src/openai/types/beta/realtime/conversation_item_with_reference.py b/src/openai/types/beta/realtime/conversation_item_with_reference.py index 31806afc33..dec7a5a409 100644 --- a/src/openai/types/beta/realtime/conversation_item_with_reference.py +++ b/src/openai/types/beta/realtime/conversation_item_with_reference.py @@ -53,8 +53,8 @@ class ConversationItemWithReference(BaseModel): for `message` items. """ - status: Optional[Literal["completed", "incomplete"]] = None - """The status of the item (`completed`, `incomplete`). + status: Optional[Literal["completed", "incomplete", "in_progress"]] = None + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/src/openai/types/beta/realtime/conversation_item_with_reference_param.py b/src/openai/types/beta/realtime/conversation_item_with_reference_param.py index e266cdce32..3778373a4c 100644 --- a/src/openai/types/beta/realtime/conversation_item_with_reference_param.py +++ b/src/openai/types/beta/realtime/conversation_item_with_reference_param.py @@ -54,8 +54,8 @@ class ConversationItemWithReferenceParam(TypedDict, total=False): for `message` items. """ - status: Literal["completed", "incomplete"] - """The status of the item (`completed`, `incomplete`). + status: Literal["completed", "incomplete", "in_progress"] + """The status of the item (`completed`, `incomplete`, `in_progress`). These have no effect on the conversation, but are accepted for consistency with the `conversation.item.created` event. diff --git a/src/openai/types/beta/realtime/input_audio_buffer_committed_event.py b/src/openai/types/beta/realtime/input_audio_buffer_committed_event.py index 3071eff357..22eb53b117 100644 --- a/src/openai/types/beta/realtime/input_audio_buffer_committed_event.py +++ b/src/openai/types/beta/realtime/input_audio_buffer_committed_event.py @@ -1,5 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +from typing import Optional from typing_extensions import Literal from ...._models import BaseModel @@ -14,8 +15,11 @@ class InputAudioBufferCommittedEvent(BaseModel): item_id: str """The ID of the user message item that will be created.""" - previous_item_id: str - """The ID of the preceding item after which the new item will be inserted.""" - type: Literal["input_audio_buffer.committed"] """The event type, must be `input_audio_buffer.committed`.""" + + previous_item_id: Optional[str] = None + """ + The ID of the preceding item after which the new item will be inserted. Can be + `null` if the item has no predecessor. + """ diff --git a/src/openai/types/beta/realtime/realtime_response.py b/src/openai/types/beta/realtime/realtime_response.py index 8ecfb91c31..28e03c8717 100644 --- a/src/openai/types/beta/realtime/realtime_response.py +++ b/src/openai/types/beta/realtime/realtime_response.py @@ -60,10 +60,10 @@ class RealtimeResponse(BaseModel): output_audio_format: Optional[Literal["pcm16", "g711_ulaw", "g711_alaw"]] = None """The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.""" - status: Optional[Literal["completed", "cancelled", "failed", "incomplete"]] = None + status: Optional[Literal["completed", "cancelled", "failed", "incomplete", "in_progress"]] = None """ The final status of the response (`completed`, `cancelled`, `failed`, or - `incomplete`). + `incomplete`, `in_progress`). """ status_details: Optional[RealtimeResponseStatus] = None diff --git a/src/openai/types/eval_create_params.py b/src/openai/types/eval_create_params.py index 20a3765481..9674785701 100644 --- a/src/openai/types/eval_create_params.py +++ b/src/openai/types/eval_create_params.py @@ -25,6 +25,7 @@ "TestingCriterionLabelModelInputEvalItem", "TestingCriterionLabelModelInputEvalItemContent", "TestingCriterionLabelModelInputEvalItemContentOutputText", + "TestingCriterionLabelModelInputEvalItemContentInputImage", "TestingCriterionTextSimilarity", "TestingCriterionPython", "TestingCriterionScoreModel", @@ -109,14 +110,32 @@ class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total= """The type of the output text. Always `output_text`.""" +class TestingCriterionLabelModelInputEvalItemContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + TestingCriterionLabelModelInputEvalItemContent: TypeAlias = Union[ - str, ResponseInputTextParam, TestingCriterionLabelModelInputEvalItemContentOutputText + str, + ResponseInputTextParam, + TestingCriterionLabelModelInputEvalItemContentOutputText, + TestingCriterionLabelModelInputEvalItemContentInputImage, + Iterable[object], ] class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False): content: Required[TestingCriterionLabelModelInputEvalItemContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py index 0a942cd200..a0eaa5addb 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source.py @@ -26,6 +26,7 @@ "InputMessagesTemplateTemplateMessage", "InputMessagesTemplateTemplateMessageContent", "InputMessagesTemplateTemplateMessageContentOutputText", + "InputMessagesTemplateTemplateMessageContentInputImage", "InputMessagesItemReference", "SamplingParams", "SamplingParamsResponseFormat", @@ -94,14 +95,32 @@ class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel): """The type of the output text. Always `output_text`.""" +class InputMessagesTemplateTemplateMessageContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ - str, ResponseInputText, InputMessagesTemplateTemplateMessageContentOutputText + str, + ResponseInputText, + InputMessagesTemplateTemplateMessageContentOutputText, + InputMessagesTemplateTemplateMessageContentInputImage, + List[object], ] class InputMessagesTemplateTemplateMessage(BaseModel): content: InputMessagesTemplateTemplateMessageContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py index 84344fcd94..8892b68b17 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -26,6 +26,7 @@ "InputMessagesTemplateTemplateMessage", "InputMessagesTemplateTemplateMessageContent", "InputMessagesTemplateTemplateMessageContentOutputText", + "InputMessagesTemplateTemplateMessageContentInputImage", "InputMessagesItemReference", "SamplingParams", "SamplingParamsResponseFormat", @@ -92,14 +93,32 @@ class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=Fal """The type of the output text. Always `output_text`.""" +class InputMessagesTemplateTemplateMessageContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ - str, ResponseInputTextParam, InputMessagesTemplateTemplateMessageContentOutputText + str, + ResponseInputTextParam, + InputMessagesTemplateTemplateMessageContentOutputText, + InputMessagesTemplateTemplateMessageContentInputImage, + Iterable[object], ] class InputMessagesTemplateTemplateMessage(TypedDict, total=False): content: Required[InputMessagesTemplateTemplateMessageContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. diff --git a/src/openai/types/evals/run_cancel_response.py b/src/openai/types/evals/run_cancel_response.py index 12cc868045..7f4f4c9cc4 100644 --- a/src/openai/types/evals/run_cancel_response.py +++ b/src/openai/types/evals/run_cancel_response.py @@ -32,6 +32,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +139,32 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/evals/run_create_params.py b/src/openai/types/evals/run_create_params.py index 354a81132e..1622b00eb7 100644 --- a/src/openai/types/evals/run_create_params.py +++ b/src/openai/types/evals/run_create_params.py @@ -29,6 +29,7 @@ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem", "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent", "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference", "DataSourceCreateEvalResponsesRunDataSourceSamplingParams", "DataSourceCreateEvalResponsesRunDataSourceSamplingParamsText", @@ -153,16 +154,34 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEva """The type of the output text. Always `output_text`.""" +class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage( + TypedDict, total=False +): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ str, ResponseInputTextParam, DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentInputImage, + Iterable[object], ] class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem(TypedDict, total=False): content: Required[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. diff --git a/src/openai/types/evals/run_create_response.py b/src/openai/types/evals/run_create_response.py index 776ebb413f..fba5321552 100644 --- a/src/openai/types/evals/run_create_response.py +++ b/src/openai/types/evals/run_create_response.py @@ -32,6 +32,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +139,32 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/evals/run_list_response.py b/src/openai/types/evals/run_list_response.py index 9e2374f93c..e9e445af5c 100644 --- a/src/openai/types/evals/run_list_response.py +++ b/src/openai/types/evals/run_list_response.py @@ -32,6 +32,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +139,32 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/evals/run_retrieve_response.py b/src/openai/types/evals/run_retrieve_response.py index a4f43ce3f9..e13f1abe42 100644 --- a/src/openai/types/evals/run_retrieve_response.py +++ b/src/openai/types/evals/run_retrieve_response.py @@ -32,6 +32,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", "DataSourceResponsesSamplingParamsText", @@ -138,14 +139,32 @@ class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText( """The type of the output text. Always `output_text`.""" +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText + str, + ResponseInputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText, + DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentInputImage, + List[object], ] class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/graders/label_model_grader.py b/src/openai/types/graders/label_model_grader.py index d95ccc6df6..76dbfb854a 100644 --- a/src/openai/types/graders/label_model_grader.py +++ b/src/openai/types/graders/label_model_grader.py @@ -6,7 +6,7 @@ from ..._models import BaseModel from ..responses.response_input_text import ResponseInputText -__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(BaseModel): @@ -17,12 +17,26 @@ class InputContentOutputText(BaseModel): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText] +class InputContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText, InputContentInputImage, List[object]] class Input(BaseModel): content: InputContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/graders/label_model_grader_param.py b/src/openai/types/graders/label_model_grader_param.py index 76d01421ee..941c8a1bd0 100644 --- a/src/openai/types/graders/label_model_grader_param.py +++ b/src/openai/types/graders/label_model_grader_param.py @@ -7,7 +7,7 @@ from ..responses.response_input_text_param import ResponseInputTextParam -__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(TypedDict, total=False): @@ -18,12 +18,28 @@ class InputContentOutputText(TypedDict, total=False): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText] +class InputContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[ + str, ResponseInputTextParam, InputContentOutputText, InputContentInputImage, Iterable[object] +] class Input(TypedDict, total=False): content: Required[InputContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input. diff --git a/src/openai/types/graders/score_model_grader.py b/src/openai/types/graders/score_model_grader.py index 1349f75a58..e6af0ebcf7 100644 --- a/src/openai/types/graders/score_model_grader.py +++ b/src/openai/types/graders/score_model_grader.py @@ -6,7 +6,7 @@ from ..._models import BaseModel from ..responses.response_input_text import ResponseInputText -__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(BaseModel): @@ -17,12 +17,26 @@ class InputContentOutputText(BaseModel): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText] +class InputContentInputImage(BaseModel): + image_url: str + """The URL of the image input.""" + + type: Literal["input_image"] + """The type of the image input. Always `input_image`.""" + + detail: Optional[str] = None + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText, InputContentInputImage, List[object]] class Input(BaseModel): content: InputContent - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Literal["user", "assistant", "system", "developer"] """The role of the message input. diff --git a/src/openai/types/graders/score_model_grader_param.py b/src/openai/types/graders/score_model_grader_param.py index 673f14e47d..47c9928076 100644 --- a/src/openai/types/graders/score_model_grader_param.py +++ b/src/openai/types/graders/score_model_grader_param.py @@ -7,7 +7,7 @@ from ..responses.response_input_text_param import ResponseInputTextParam -__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText"] +__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText", "InputContentInputImage"] class InputContentOutputText(TypedDict, total=False): @@ -18,12 +18,28 @@ class InputContentOutputText(TypedDict, total=False): """The type of the output text. Always `output_text`.""" -InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText] +class InputContentInputImage(TypedDict, total=False): + image_url: Required[str] + """The URL of the image input.""" + + type: Required[Literal["input_image"]] + """The type of the image input. Always `input_image`.""" + + detail: str + """The detail level of the image to be sent to the model. + + One of `high`, `low`, or `auto`. Defaults to `auto`. + """ + + +InputContent: TypeAlias = Union[ + str, ResponseInputTextParam, InputContentOutputText, InputContentInputImage, Iterable[object] +] class Input(TypedDict, total=False): content: Required[InputContent] - """Text inputs to the model - can contain template strings.""" + """Inputs to the model - can contain template strings.""" role: Required[Literal["user", "assistant", "system", "developer"]] """The role of the message input.