From 643c1521046f9dd88387ea178261c41316479b27 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 5 Feb 2025 01:27:05 +0000
Subject: [PATCH] feat(api): Add batch API
---
.stats.yml | 2 +-
api.md | 36 ++
src/groq/_client.py | 18 +-
src/groq/resources/__init__.py | 28 ++
src/groq/resources/batches.py | 351 +++++++++++++++
src/groq/resources/files.py | 504 ++++++++++++++++++++++
src/groq/types/__init__.py | 10 +
src/groq/types/batch_create_params.py | 34 ++
src/groq/types/batch_create_response.py | 107 +++++
src/groq/types/batch_list_response.py | 113 +++++
src/groq/types/batch_retrieve_response.py | 107 +++++
src/groq/types/file_content_response.py | 7 +
src/groq/types/file_create_params.py | 20 +
src/groq/types/file_create_response.py | 31 ++
src/groq/types/file_delete_response.py | 15 +
src/groq/types/file_info_response.py | 31 ++
src/groq/types/file_list_response.py | 37 ++
tests/api_resources/test_batches.py | 242 +++++++++++
tests/api_resources/test_files.py | 373 ++++++++++++++++
19 files changed, 2064 insertions(+), 2 deletions(-)
create mode 100644 src/groq/resources/batches.py
create mode 100644 src/groq/resources/files.py
create mode 100644 src/groq/types/batch_create_params.py
create mode 100644 src/groq/types/batch_create_response.py
create mode 100644 src/groq/types/batch_list_response.py
create mode 100644 src/groq/types/batch_retrieve_response.py
create mode 100644 src/groq/types/file_content_response.py
create mode 100644 src/groq/types/file_create_params.py
create mode 100644 src/groq/types/file_create_response.py
create mode 100644 src/groq/types/file_delete_response.py
create mode 100644 src/groq/types/file_info_response.py
create mode 100644 src/groq/types/file_list_response.py
create mode 100644 tests/api_resources/test_batches.py
create mode 100644 tests/api_resources/test_files.py
diff --git a/.stats.yml b/.stats.yml
index 42de645..516dbe0 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,2 +1,2 @@
-configured_endpoints: 7
+configured_endpoints: 15
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-d1588e103a6ae0234752b8e54a746fb1e4c93a0ee51ede294017bcd4f0ee4ac0.yml
diff --git a/api.md b/api.md
index 2f00821..b977024 100644
--- a/api.md
+++ b/api.md
@@ -97,3 +97,39 @@ Methods:
- client.models.retrieve(model) -> Model
- client.models.list() -> ModelListResponse
- client.models.delete(model) -> ModelDeleted
+
+# Batches
+
+Types:
+
+```python
+from groq.types import BatchCreateResponse, BatchRetrieveResponse, BatchListResponse
+```
+
+Methods:
+
+- client.batches.create(\*\*params) -> BatchCreateResponse
+- client.batches.retrieve(batch_id) -> BatchRetrieveResponse
+- client.batches.list() -> BatchListResponse
+
+# Files
+
+Types:
+
+```python
+from groq.types import (
+ FileCreateResponse,
+ FileListResponse,
+ FileDeleteResponse,
+ FileContentResponse,
+ FileInfoResponse,
+)
+```
+
+Methods:
+
+- client.files.create(\*\*params) -> FileCreateResponse
+- client.files.list() -> FileListResponse
+- client.files.delete(file_id) -> FileDeleteResponse
+- client.files.content(file_id) -> str
+- client.files.info(file_id) -> FileInfoResponse
diff --git a/src/groq/_client.py b/src/groq/_client.py
index f8d0c19..d77b872 100644
--- a/src/groq/_client.py
+++ b/src/groq/_client.py
@@ -24,7 +24,7 @@
get_async_library,
)
from ._version import __version__
-from .resources import models, embeddings
+from .resources import files, models, batches, embeddings
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
from ._exceptions import GroqError, APIStatusError
from ._base_client import (
@@ -43,6 +43,8 @@ class Groq(SyncAPIClient):
embeddings: embeddings.Embeddings
audio: audio.Audio
models: models.Models
+ batches: batches.Batches
+ files: files.Files
with_raw_response: GroqWithRawResponse
with_streaming_response: GroqWithStreamedResponse
@@ -104,6 +106,8 @@ def __init__(
self.embeddings = embeddings.Embeddings(self)
self.audio = audio.Audio(self)
self.models = models.Models(self)
+ self.batches = batches.Batches(self)
+ self.files = files.Files(self)
self.with_raw_response = GroqWithRawResponse(self)
self.with_streaming_response = GroqWithStreamedResponse(self)
@@ -217,6 +221,8 @@ class AsyncGroq(AsyncAPIClient):
embeddings: embeddings.AsyncEmbeddings
audio: audio.AsyncAudio
models: models.AsyncModels
+ batches: batches.AsyncBatches
+ files: files.AsyncFiles
with_raw_response: AsyncGroqWithRawResponse
with_streaming_response: AsyncGroqWithStreamedResponse
@@ -278,6 +284,8 @@ def __init__(
self.embeddings = embeddings.AsyncEmbeddings(self)
self.audio = audio.AsyncAudio(self)
self.models = models.AsyncModels(self)
+ self.batches = batches.AsyncBatches(self)
+ self.files = files.AsyncFiles(self)
self.with_raw_response = AsyncGroqWithRawResponse(self)
self.with_streaming_response = AsyncGroqWithStreamedResponse(self)
@@ -392,6 +400,8 @@ def __init__(self, client: Groq) -> None:
self.embeddings = embeddings.EmbeddingsWithRawResponse(client.embeddings)
self.audio = audio.AudioWithRawResponse(client.audio)
self.models = models.ModelsWithRawResponse(client.models)
+ self.batches = batches.BatchesWithRawResponse(client.batches)
+ self.files = files.FilesWithRawResponse(client.files)
class AsyncGroqWithRawResponse:
@@ -400,6 +410,8 @@ def __init__(self, client: AsyncGroq) -> None:
self.embeddings = embeddings.AsyncEmbeddingsWithRawResponse(client.embeddings)
self.audio = audio.AsyncAudioWithRawResponse(client.audio)
self.models = models.AsyncModelsWithRawResponse(client.models)
+ self.batches = batches.AsyncBatchesWithRawResponse(client.batches)
+ self.files = files.AsyncFilesWithRawResponse(client.files)
class GroqWithStreamedResponse:
@@ -408,6 +420,8 @@ def __init__(self, client: Groq) -> None:
self.embeddings = embeddings.EmbeddingsWithStreamingResponse(client.embeddings)
self.audio = audio.AudioWithStreamingResponse(client.audio)
self.models = models.ModelsWithStreamingResponse(client.models)
+ self.batches = batches.BatchesWithStreamingResponse(client.batches)
+ self.files = files.FilesWithStreamingResponse(client.files)
class AsyncGroqWithStreamedResponse:
@@ -416,6 +430,8 @@ def __init__(self, client: AsyncGroq) -> None:
self.embeddings = embeddings.AsyncEmbeddingsWithStreamingResponse(client.embeddings)
self.audio = audio.AsyncAudioWithStreamingResponse(client.audio)
self.models = models.AsyncModelsWithStreamingResponse(client.models)
+ self.batches = batches.AsyncBatchesWithStreamingResponse(client.batches)
+ self.files = files.AsyncFilesWithStreamingResponse(client.files)
Client = Groq
diff --git a/src/groq/resources/__init__.py b/src/groq/resources/__init__.py
index 4f29788..7add5ef 100644
--- a/src/groq/resources/__init__.py
+++ b/src/groq/resources/__init__.py
@@ -16,6 +16,14 @@
AudioWithStreamingResponse,
AsyncAudioWithStreamingResponse,
)
+from .files import (
+ Files,
+ AsyncFiles,
+ FilesWithRawResponse,
+ AsyncFilesWithRawResponse,
+ FilesWithStreamingResponse,
+ AsyncFilesWithStreamingResponse,
+)
from .models import (
Models,
AsyncModels,
@@ -24,6 +32,14 @@
ModelsWithStreamingResponse,
AsyncModelsWithStreamingResponse,
)
+from .batches import (
+ Batches,
+ AsyncBatches,
+ BatchesWithRawResponse,
+ AsyncBatchesWithRawResponse,
+ BatchesWithStreamingResponse,
+ AsyncBatchesWithStreamingResponse,
+)
from .embeddings import (
Embeddings,
AsyncEmbeddings,
@@ -58,4 +74,16 @@
"AsyncModelsWithRawResponse",
"ModelsWithStreamingResponse",
"AsyncModelsWithStreamingResponse",
+ "Batches",
+ "AsyncBatches",
+ "BatchesWithRawResponse",
+ "AsyncBatchesWithRawResponse",
+ "BatchesWithStreamingResponse",
+ "AsyncBatchesWithStreamingResponse",
+ "Files",
+ "AsyncFiles",
+ "FilesWithRawResponse",
+ "AsyncFilesWithRawResponse",
+ "FilesWithStreamingResponse",
+ "AsyncFilesWithStreamingResponse",
]
diff --git a/src/groq/resources/batches.py b/src/groq/resources/batches.py
new file mode 100644
index 0000000..826b8ac
--- /dev/null
+++ b/src/groq/resources/batches.py
@@ -0,0 +1,351 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal
+
+import httpx
+
+from ..types import batch_create_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._utils import (
+ maybe_transform,
+ async_maybe_transform,
+)
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._base_client import make_request_options
+from ..types.batch_list_response import BatchListResponse
+from ..types.batch_create_response import BatchCreateResponse
+from ..types.batch_retrieve_response import BatchRetrieveResponse
+
+__all__ = ["Batches", "AsyncBatches"]
+
+
+class Batches(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> BatchesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/groq/groq-python#accessing-raw-response-data-eg-headers
+ """
+ return BatchesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> BatchesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/groq/groq-python#with_streaming_response
+ """
+ return BatchesWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ completion_window: Literal["24h"],
+ endpoint: Literal["/v1/chat/completions"],
+ input_file_id: str,
+ metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BatchCreateResponse:
+ """
+ Creates and executes a batch from an uploaded file of requests
+
+ Args:
+ completion_window: The time frame within which the batch should be processed. Currently only `24h`
+ is supported.
+
+ endpoint: The endpoint to be used for all requests in the batch. Currently
+ `/v1/chat/completions` is supported.
+
+ input_file_id: The ID of an uploaded file that contains requests for the new batch.
+
+ See [upload file](/docs/api-reference#files-upload) for how to upload a file.
+
+ Your input file must be formatted as a [JSONL file](/docs/batch), and must be
+ uploaded with the purpose `batch`. The file can be up to 100 MB in size.
+
+ metadata: Optional custom metadata for the batch.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/openai/v1/batches",
+ body=maybe_transform(
+ {
+ "completion_window": completion_window,
+ "endpoint": endpoint,
+ "input_file_id": input_file_id,
+ "metadata": metadata,
+ },
+ batch_create_params.BatchCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BatchCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ batch_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BatchRetrieveResponse:
+ """
+ Retrieves a batch.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ return self._get(
+ f"/openai/v1/batches/{batch_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BatchRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BatchListResponse:
+ """List your organization's batches."""
+ return self._get(
+ "/openai/v1/batches",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BatchListResponse,
+ )
+
+
+class AsyncBatches(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncBatchesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/groq/groq-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncBatchesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncBatchesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/groq/groq-python#with_streaming_response
+ """
+ return AsyncBatchesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ completion_window: Literal["24h"],
+ endpoint: Literal["/v1/chat/completions"],
+ input_file_id: str,
+ metadata: Optional[Dict[str, str]] | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BatchCreateResponse:
+ """
+ Creates and executes a batch from an uploaded file of requests
+
+ Args:
+ completion_window: The time frame within which the batch should be processed. Currently only `24h`
+ is supported.
+
+ endpoint: The endpoint to be used for all requests in the batch. Currently
+ `/v1/chat/completions` is supported.
+
+ input_file_id: The ID of an uploaded file that contains requests for the new batch.
+
+ See [upload file](/docs/api-reference#files-upload) for how to upload a file.
+
+ Your input file must be formatted as a [JSONL file](/docs/batch), and must be
+ uploaded with the purpose `batch`. The file can be up to 100 MB in size.
+
+ metadata: Optional custom metadata for the batch.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/openai/v1/batches",
+ body=await async_maybe_transform(
+ {
+ "completion_window": completion_window,
+ "endpoint": endpoint,
+ "input_file_id": input_file_id,
+ "metadata": metadata,
+ },
+ batch_create_params.BatchCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BatchCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ batch_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BatchRetrieveResponse:
+ """
+ Retrieves a batch.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not batch_id:
+ raise ValueError(f"Expected a non-empty value for `batch_id` but received {batch_id!r}")
+ return await self._get(
+ f"/openai/v1/batches/{batch_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BatchRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> BatchListResponse:
+ """List your organization's batches."""
+ return await self._get(
+ "/openai/v1/batches",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=BatchListResponse,
+ )
+
+
+class BatchesWithRawResponse:
+ def __init__(self, batches: Batches) -> None:
+ self._batches = batches
+
+ self.create = to_raw_response_wrapper(
+ batches.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ batches.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ batches.list,
+ )
+
+
+class AsyncBatchesWithRawResponse:
+ def __init__(self, batches: AsyncBatches) -> None:
+ self._batches = batches
+
+ self.create = async_to_raw_response_wrapper(
+ batches.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ batches.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ batches.list,
+ )
+
+
+class BatchesWithStreamingResponse:
+ def __init__(self, batches: Batches) -> None:
+ self._batches = batches
+
+ self.create = to_streamed_response_wrapper(
+ batches.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ batches.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ batches.list,
+ )
+
+
+class AsyncBatchesWithStreamingResponse:
+ def __init__(self, batches: AsyncBatches) -> None:
+ self._batches = batches
+
+ self.create = async_to_streamed_response_wrapper(
+ batches.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ batches.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ batches.list,
+ )
diff --git a/src/groq/resources/files.py b/src/groq/resources/files.py
new file mode 100644
index 0000000..835f207
--- /dev/null
+++ b/src/groq/resources/files.py
@@ -0,0 +1,504 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Mapping, cast
+from typing_extensions import Literal
+
+import httpx
+
+from ..types import file_create_params
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
+from .._utils import (
+ extract_files,
+ maybe_transform,
+ deepcopy_minimal,
+ async_maybe_transform,
+)
+from .._compat import cached_property
+from .._resource import SyncAPIResource, AsyncAPIResource
+from .._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from .._base_client import make_request_options
+from ..types.file_info_response import FileInfoResponse
+from ..types.file_list_response import FileListResponse
+from ..types.file_create_response import FileCreateResponse
+from ..types.file_delete_response import FileDeleteResponse
+
+__all__ = ["Files", "AsyncFiles"]
+
+
+class Files(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> FilesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/groq/groq-python#accessing-raw-response-data-eg-headers
+ """
+ return FilesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> FilesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/groq/groq-python#with_streaming_response
+ """
+ return FilesWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ file: FileTypes,
+ purpose: Literal["batch"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileCreateResponse:
+ """
+ Upload a file that can be used across various endpoints.
+
+ The Batch API only supports `.jsonl` files up to 100 MB in size. The input also
+ has a specific required [format](/docs/batch).
+
+ Please contact us if you need to increase these storage limits.
+
+ Args:
+ file: The File object (not file name) to be uploaded.
+
+ purpose: The intended purpose of the uploaded file. Use "batch" for
+ [Batch API](/docs/api-reference#batches).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "file": file,
+ "purpose": purpose,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return self._post(
+ "/openai/v1/files",
+ body=maybe_transform(body, file_create_params.FileCreateParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileCreateResponse,
+ )
+
+ def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileListResponse:
+ """Returns a list of files."""
+ return self._get(
+ "/openai/v1/files",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileListResponse,
+ )
+
+ def delete(
+ self,
+ file_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileDeleteResponse:
+ """
+ Delete a file.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ return self._delete(
+ f"/openai/v1/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileDeleteResponse,
+ )
+
+ def content(
+ self,
+ file_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> str:
+ """
+ Returns the contents of the specified file.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ return self._get(
+ f"/openai/v1/files/{file_id}/content",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=str,
+ )
+
+ def info(
+ self,
+ file_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileInfoResponse:
+ """
+ Returns information about a file.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ return self._get(
+ f"/openai/v1/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileInfoResponse,
+ )
+
+
+class AsyncFiles(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncFilesWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/groq/groq-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncFilesWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncFilesWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/groq/groq-python#with_streaming_response
+ """
+ return AsyncFilesWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ file: FileTypes,
+ purpose: Literal["batch"],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileCreateResponse:
+ """
+ Upload a file that can be used across various endpoints.
+
+ The Batch API only supports `.jsonl` files up to 100 MB in size. The input also
+ has a specific required [format](/docs/batch).
+
+ Please contact us if you need to increase these storage limits.
+
+ Args:
+ file: The File object (not file name) to be uploaded.
+
+ purpose: The intended purpose of the uploaded file. Use "batch" for
+ [Batch API](/docs/api-reference#batches).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ body = deepcopy_minimal(
+ {
+ "file": file,
+ "purpose": purpose,
+ }
+ )
+ files = extract_files(cast(Mapping[str, object], body), paths=[["file"]])
+ # It should be noted that the actual Content-Type header that will be
+ # sent to the server will contain a `boundary` parameter, e.g.
+ # multipart/form-data; boundary=---abc--
+ extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
+ return await self._post(
+ "/openai/v1/files",
+ body=await async_maybe_transform(body, file_create_params.FileCreateParams),
+ files=files,
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileCreateResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileListResponse:
+ """Returns a list of files."""
+ return await self._get(
+ "/openai/v1/files",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileListResponse,
+ )
+
+ async def delete(
+ self,
+ file_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileDeleteResponse:
+ """
+ Delete a file.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ return await self._delete(
+ f"/openai/v1/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileDeleteResponse,
+ )
+
+ async def content(
+ self,
+ file_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> str:
+ """
+ Returns the contents of the specified file.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ return await self._get(
+ f"/openai/v1/files/{file_id}/content",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=str,
+ )
+
+ async def info(
+ self,
+ file_id: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> FileInfoResponse:
+ """
+ Returns information about a file.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not file_id:
+ raise ValueError(f"Expected a non-empty value for `file_id` but received {file_id!r}")
+ return await self._get(
+ f"/openai/v1/files/{file_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=FileInfoResponse,
+ )
+
+
+class FilesWithRawResponse:
+ def __init__(self, files: Files) -> None:
+ self._files = files
+
+ self.create = to_raw_response_wrapper(
+ files.create,
+ )
+ self.list = to_raw_response_wrapper(
+ files.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ files.delete,
+ )
+ self.content = to_raw_response_wrapper(
+ files.content,
+ )
+ self.info = to_raw_response_wrapper(
+ files.info,
+ )
+
+
+class AsyncFilesWithRawResponse:
+ def __init__(self, files: AsyncFiles) -> None:
+ self._files = files
+
+ self.create = async_to_raw_response_wrapper(
+ files.create,
+ )
+ self.list = async_to_raw_response_wrapper(
+ files.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ files.delete,
+ )
+ self.content = async_to_raw_response_wrapper(
+ files.content,
+ )
+ self.info = async_to_raw_response_wrapper(
+ files.info,
+ )
+
+
+class FilesWithStreamingResponse:
+ def __init__(self, files: Files) -> None:
+ self._files = files
+
+ self.create = to_streamed_response_wrapper(
+ files.create,
+ )
+ self.list = to_streamed_response_wrapper(
+ files.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ files.delete,
+ )
+ self.content = to_streamed_response_wrapper(
+ files.content,
+ )
+ self.info = to_streamed_response_wrapper(
+ files.info,
+ )
+
+
+class AsyncFilesWithStreamingResponse:
+ def __init__(self, files: AsyncFiles) -> None:
+ self._files = files
+
+ self.create = async_to_streamed_response_wrapper(
+ files.create,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ files.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ files.delete,
+ )
+ self.content = async_to_streamed_response_wrapper(
+ files.content,
+ )
+ self.info = async_to_streamed_response_wrapper(
+ files.info,
+ )
diff --git a/src/groq/types/__init__.py b/src/groq/types/__init__.py
index bb0aada..116ed55 100644
--- a/src/groq/types/__init__.py
+++ b/src/groq/types/__init__.py
@@ -11,6 +11,16 @@
from .embedding import Embedding as Embedding
from .model_deleted import ModelDeleted as ModelDeleted
from .completion_usage import CompletionUsage as CompletionUsage
+from .file_create_params import FileCreateParams as FileCreateParams
+from .file_info_response import FileInfoResponse as FileInfoResponse
+from .file_list_response import FileListResponse as FileListResponse
+from .batch_create_params import BatchCreateParams as BatchCreateParams
+from .batch_list_response import BatchListResponse as BatchListResponse
from .model_list_response import ModelListResponse as ModelListResponse
+from .file_create_response import FileCreateResponse as FileCreateResponse
+from .file_delete_response import FileDeleteResponse as FileDeleteResponse
+from .batch_create_response import BatchCreateResponse as BatchCreateResponse
+from .file_content_response import FileContentResponse as FileContentResponse
+from .batch_retrieve_response import BatchRetrieveResponse as BatchRetrieveResponse
from .embedding_create_params import EmbeddingCreateParams as EmbeddingCreateParams
from .create_embedding_response import CreateEmbeddingResponse as CreateEmbeddingResponse
diff --git a/src/groq/types/batch_create_params.py b/src/groq/types/batch_create_params.py
new file mode 100644
index 0000000..c1f38a3
--- /dev/null
+++ b/src/groq/types/batch_create_params.py
@@ -0,0 +1,34 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Dict, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["BatchCreateParams"]
+
+
+class BatchCreateParams(TypedDict, total=False):
+ completion_window: Required[Literal["24h"]]
+ """The time frame within which the batch should be processed.
+
+ Currently only `24h` is supported.
+ """
+
+ endpoint: Required[Literal["/v1/chat/completions"]]
+ """The endpoint to be used for all requests in the batch.
+
+ Currently `/v1/chat/completions` is supported.
+ """
+
+ input_file_id: Required[str]
+ """The ID of an uploaded file that contains requests for the new batch.
+
+ See [upload file](/docs/api-reference#files-upload) for how to upload a file.
+
+ Your input file must be formatted as a [JSONL file](/docs/batch), and must be
+ uploaded with the purpose `batch`. The file can be up to 100 MB in size.
+ """
+
+ metadata: Optional[Dict[str, str]]
+ """Optional custom metadata for the batch."""
diff --git a/src/groq/types/batch_create_response.py b/src/groq/types/batch_create_response.py
new file mode 100644
index 0000000..8f0a8ab
--- /dev/null
+++ b/src/groq/types/batch_create_response.py
@@ -0,0 +1,107 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import builtins
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["BatchCreateResponse", "Errors", "ErrorsData", "RequestCounts"]
+
+
+class ErrorsData(BaseModel):
+ code: Optional[str] = None
+ """An error code identifying the error type."""
+
+ line: Optional[int] = None
+ """The line number of the input file where the error occurred, if applicable."""
+
+ message: Optional[str] = None
+ """A human-readable message providing more details about the error."""
+
+ param: Optional[str] = None
+ """The name of the parameter that caused the error, if applicable."""
+
+
+class Errors(BaseModel):
+ data: Optional[List[ErrorsData]] = None
+
+ object: Optional[str] = None
+ """The object type, which is always `list`."""
+
+
+class RequestCounts(BaseModel):
+ completed: int
+ """Number of requests that have been completed successfully."""
+
+ failed: int
+ """Number of requests that have failed."""
+
+ total: int
+ """Total number of requests in the batch."""
+
+
+class BatchCreateResponse(BaseModel):
+ id: str
+
+ completion_window: str
+ """The time frame within which the batch should be processed."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the batch was created."""
+
+ endpoint: str
+ """The API endpoint used by the batch."""
+
+ input_file_id: str
+ """The ID of the input file for the batch."""
+
+ object: Literal["batch"]
+ """The object type, which is always `batch`."""
+
+ status: Literal[
+ "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled"
+ ]
+ """The current status of the batch."""
+
+ cancelled_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch was cancelled."""
+
+ cancelling_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started cancelling."""
+
+ completed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch was completed."""
+
+ error_file_id: Optional[str] = None
+ """The ID of the file containing the outputs of requests with errors."""
+
+ errors: Optional[Errors] = None
+
+ expired_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch expired."""
+
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch will expire."""
+
+ failed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch failed."""
+
+ finalizing_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started finalizing."""
+
+ in_progress_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started processing."""
+
+ metadata: Optional[builtins.object] = None
+ """Set of key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format.
+ """
+
+ output_file_id: Optional[str] = None
+ """The ID of the file containing the outputs of successfully executed requests."""
+
+ request_counts: Optional[RequestCounts] = None
+ """The request counts for different statuses within the batch."""
diff --git a/src/groq/types/batch_list_response.py b/src/groq/types/batch_list_response.py
new file mode 100644
index 0000000..a6b3389
--- /dev/null
+++ b/src/groq/types/batch_list_response.py
@@ -0,0 +1,113 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import builtins
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["BatchListResponse", "Data", "DataErrors", "DataErrorsData", "DataRequestCounts"]
+
+
+class DataErrorsData(BaseModel):
+ code: Optional[str] = None
+ """An error code identifying the error type."""
+
+ line: Optional[int] = None
+ """The line number of the input file where the error occurred, if applicable."""
+
+ message: Optional[str] = None
+ """A human-readable message providing more details about the error."""
+
+ param: Optional[str] = None
+ """The name of the parameter that caused the error, if applicable."""
+
+
+class DataErrors(BaseModel):
+ data: Optional[List[DataErrorsData]] = None
+
+ object: Optional[str] = None
+ """The object type, which is always `list`."""
+
+
+class DataRequestCounts(BaseModel):
+ completed: int
+ """Number of requests that have been completed successfully."""
+
+ failed: int
+ """Number of requests that have failed."""
+
+ total: int
+ """Total number of requests in the batch."""
+
+
+class Data(BaseModel):
+ id: str
+
+ completion_window: str
+ """The time frame within which the batch should be processed."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the batch was created."""
+
+ endpoint: str
+ """The API endpoint used by the batch."""
+
+ input_file_id: str
+ """The ID of the input file for the batch."""
+
+ object: Literal["batch"]
+ """The object type, which is always `batch`."""
+
+ status: Literal[
+ "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled"
+ ]
+ """The current status of the batch."""
+
+ cancelled_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch was cancelled."""
+
+ cancelling_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started cancelling."""
+
+ completed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch was completed."""
+
+ error_file_id: Optional[str] = None
+ """The ID of the file containing the outputs of requests with errors."""
+
+ errors: Optional[DataErrors] = None
+
+ expired_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch expired."""
+
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch will expire."""
+
+ failed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch failed."""
+
+ finalizing_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started finalizing."""
+
+ in_progress_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started processing."""
+
+ metadata: Optional[builtins.object] = None
+ """Set of key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format.
+ """
+
+ output_file_id: Optional[str] = None
+ """The ID of the file containing the outputs of successfully executed requests."""
+
+ request_counts: Optional[DataRequestCounts] = None
+ """The request counts for different statuses within the batch."""
+
+
+class BatchListResponse(BaseModel):
+ data: List[Data]
+
+ object: Literal["list"]
diff --git a/src/groq/types/batch_retrieve_response.py b/src/groq/types/batch_retrieve_response.py
new file mode 100644
index 0000000..ddc8ab6
--- /dev/null
+++ b/src/groq/types/batch_retrieve_response.py
@@ -0,0 +1,107 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+import builtins
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["BatchRetrieveResponse", "Errors", "ErrorsData", "RequestCounts"]
+
+
+class ErrorsData(BaseModel):
+ code: Optional[str] = None
+ """An error code identifying the error type."""
+
+ line: Optional[int] = None
+ """The line number of the input file where the error occurred, if applicable."""
+
+ message: Optional[str] = None
+ """A human-readable message providing more details about the error."""
+
+ param: Optional[str] = None
+ """The name of the parameter that caused the error, if applicable."""
+
+
+class Errors(BaseModel):
+ data: Optional[List[ErrorsData]] = None
+
+ object: Optional[str] = None
+ """The object type, which is always `list`."""
+
+
+class RequestCounts(BaseModel):
+ completed: int
+ """Number of requests that have been completed successfully."""
+
+ failed: int
+ """Number of requests that have failed."""
+
+ total: int
+ """Total number of requests in the batch."""
+
+
+class BatchRetrieveResponse(BaseModel):
+ id: str
+
+ completion_window: str
+ """The time frame within which the batch should be processed."""
+
+ created_at: int
+ """The Unix timestamp (in seconds) for when the batch was created."""
+
+ endpoint: str
+ """The API endpoint used by the batch."""
+
+ input_file_id: str
+ """The ID of the input file for the batch."""
+
+ object: Literal["batch"]
+ """The object type, which is always `batch`."""
+
+ status: Literal[
+ "validating", "failed", "in_progress", "finalizing", "completed", "expired", "cancelling", "cancelled"
+ ]
+ """The current status of the batch."""
+
+ cancelled_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch was cancelled."""
+
+ cancelling_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started cancelling."""
+
+ completed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch was completed."""
+
+ error_file_id: Optional[str] = None
+ """The ID of the file containing the outputs of requests with errors."""
+
+ errors: Optional[Errors] = None
+
+ expired_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch expired."""
+
+ expires_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch will expire."""
+
+ failed_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch failed."""
+
+ finalizing_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started finalizing."""
+
+ in_progress_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the batch started processing."""
+
+ metadata: Optional[builtins.object] = None
+ """Set of key-value pairs that can be attached to an object.
+
+ This can be useful for storing additional information about the object in a
+ structured format.
+ """
+
+ output_file_id: Optional[str] = None
+ """The ID of the file containing the outputs of successfully executed requests."""
+
+ request_counts: Optional[RequestCounts] = None
+ """The request counts for different statuses within the batch."""
diff --git a/src/groq/types/file_content_response.py b/src/groq/types/file_content_response.py
new file mode 100644
index 0000000..c7f72a7
--- /dev/null
+++ b/src/groq/types/file_content_response.py
@@ -0,0 +1,7 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import TypeAlias
+
+__all__ = ["FileContentResponse"]
+
+FileContentResponse: TypeAlias = str
diff --git a/src/groq/types/file_create_params.py b/src/groq/types/file_create_params.py
new file mode 100644
index 0000000..54d645d
--- /dev/null
+++ b/src/groq/types/file_create_params.py
@@ -0,0 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, Required, TypedDict
+
+from .._types import FileTypes
+
+__all__ = ["FileCreateParams"]
+
+
+class FileCreateParams(TypedDict, total=False):
+ file: Required[FileTypes]
+ """The File object (not file name) to be uploaded."""
+
+ purpose: Required[Literal["batch"]]
+ """
+ The intended purpose of the uploaded file. Use "batch" for
+ [Batch API](/docs/api-reference#batches).
+ """
diff --git a/src/groq/types/file_create_response.py b/src/groq/types/file_create_response.py
new file mode 100644
index 0000000..bc0a0a9
--- /dev/null
+++ b/src/groq/types/file_create_response.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["FileCreateResponse"]
+
+
+class FileCreateResponse(BaseModel):
+ id: Optional[str] = None
+ """The file identifier, which can be referenced in the API endpoints."""
+
+ bytes: Optional[int] = None
+ """The size of the file, in bytes."""
+
+ created_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the file was created."""
+
+ filename: Optional[str] = None
+ """The name of the file."""
+
+ object: Optional[Literal["file"]] = None
+ """The object type, which is always `file`."""
+
+ purpose: Optional[Literal["batch", "batch_output"]] = None
+ """The intended purpose of the file.
+
+ Supported values are `batch`, and `batch_output`.
+ """
diff --git a/src/groq/types/file_delete_response.py b/src/groq/types/file_delete_response.py
new file mode 100644
index 0000000..26e2e05
--- /dev/null
+++ b/src/groq/types/file_delete_response.py
@@ -0,0 +1,15 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["FileDeleteResponse"]
+
+
+class FileDeleteResponse(BaseModel):
+ id: str
+
+ deleted: bool
+
+ object: Literal["file"]
diff --git a/src/groq/types/file_info_response.py b/src/groq/types/file_info_response.py
new file mode 100644
index 0000000..cfc848e
--- /dev/null
+++ b/src/groq/types/file_info_response.py
@@ -0,0 +1,31 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["FileInfoResponse"]
+
+
+class FileInfoResponse(BaseModel):
+ id: Optional[str] = None
+ """The file identifier, which can be referenced in the API endpoints."""
+
+ bytes: Optional[int] = None
+ """The size of the file, in bytes."""
+
+ created_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the file was created."""
+
+ filename: Optional[str] = None
+ """The name of the file."""
+
+ object: Optional[Literal["file"]] = None
+ """The object type, which is always `file`."""
+
+ purpose: Optional[Literal["batch", "batch_output"]] = None
+ """The intended purpose of the file.
+
+ Supported values are `batch`, and `batch_output`.
+ """
diff --git a/src/groq/types/file_list_response.py b/src/groq/types/file_list_response.py
new file mode 100644
index 0000000..b42d3d0
--- /dev/null
+++ b/src/groq/types/file_list_response.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["FileListResponse", "Data"]
+
+
+class Data(BaseModel):
+ id: Optional[str] = None
+ """The file identifier, which can be referenced in the API endpoints."""
+
+ bytes: Optional[int] = None
+ """The size of the file, in bytes."""
+
+ created_at: Optional[int] = None
+ """The Unix timestamp (in seconds) for when the file was created."""
+
+ filename: Optional[str] = None
+ """The name of the file."""
+
+ object: Optional[Literal["file"]] = None
+ """The object type, which is always `file`."""
+
+ purpose: Optional[Literal["batch", "batch_output"]] = None
+ """The intended purpose of the file.
+
+ Supported values are `batch`, and `batch_output`.
+ """
+
+
+class FileListResponse(BaseModel):
+ data: List[Data]
+
+ object: Literal["list"]
diff --git a/tests/api_resources/test_batches.py b/tests/api_resources/test_batches.py
new file mode 100644
index 0000000..d57304d
--- /dev/null
+++ b/tests/api_resources/test_batches.py
@@ -0,0 +1,242 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from groq import Groq, AsyncGroq
+from groq.types import BatchListResponse, BatchCreateResponse, BatchRetrieveResponse
+from tests.utils import assert_matches_type
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestBatches:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: Groq) -> None:
+ batch = client.batches.create(
+ completion_window="24h",
+ endpoint="/v1/chat/completions",
+ input_file_id="input_file_id",
+ )
+ assert_matches_type(BatchCreateResponse, batch, path=["response"])
+
+ @parametrize
+ def test_method_create_with_all_params(self, client: Groq) -> None:
+ batch = client.batches.create(
+ completion_window="24h",
+ endpoint="/v1/chat/completions",
+ input_file_id="input_file_id",
+ metadata={"foo": "string"},
+ )
+ assert_matches_type(BatchCreateResponse, batch, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: Groq) -> None:
+ response = client.batches.with_raw_response.create(
+ completion_window="24h",
+ endpoint="/v1/chat/completions",
+ input_file_id="input_file_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ batch = response.parse()
+ assert_matches_type(BatchCreateResponse, batch, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: Groq) -> None:
+ with client.batches.with_streaming_response.create(
+ completion_window="24h",
+ endpoint="/v1/chat/completions",
+ input_file_id="input_file_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ batch = response.parse()
+ assert_matches_type(BatchCreateResponse, batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_retrieve(self, client: Groq) -> None:
+ batch = client.batches.retrieve(
+ "batch_id",
+ )
+ assert_matches_type(BatchRetrieveResponse, batch, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve(self, client: Groq) -> None:
+ response = client.batches.with_raw_response.retrieve(
+ "batch_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ batch = response.parse()
+ assert_matches_type(BatchRetrieveResponse, batch, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Groq) -> None:
+ with client.batches.with_streaming_response.retrieve(
+ "batch_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ batch = response.parse()
+ assert_matches_type(BatchRetrieveResponse, batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve(self, client: Groq) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
+ client.batches.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ def test_method_list(self, client: Groq) -> None:
+ batch = client.batches.list()
+ assert_matches_type(BatchListResponse, batch, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: Groq) -> None:
+ response = client.batches.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ batch = response.parse()
+ assert_matches_type(BatchListResponse, batch, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: Groq) -> None:
+ with client.batches.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ batch = response.parse()
+ assert_matches_type(BatchListResponse, batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncBatches:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGroq) -> None:
+ batch = await async_client.batches.create(
+ completion_window="24h",
+ endpoint="/v1/chat/completions",
+ input_file_id="input_file_id",
+ )
+ assert_matches_type(BatchCreateResponse, batch, path=["response"])
+
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> None:
+ batch = await async_client.batches.create(
+ completion_window="24h",
+ endpoint="/v1/chat/completions",
+ input_file_id="input_file_id",
+ metadata={"foo": "string"},
+ )
+ assert_matches_type(BatchCreateResponse, batch, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGroq) -> None:
+ response = await async_client.batches.with_raw_response.create(
+ completion_window="24h",
+ endpoint="/v1/chat/completions",
+ input_file_id="input_file_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ batch = await response.parse()
+ assert_matches_type(BatchCreateResponse, batch, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGroq) -> None:
+ async with async_client.batches.with_streaming_response.create(
+ completion_window="24h",
+ endpoint="/v1/chat/completions",
+ input_file_id="input_file_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ batch = await response.parse()
+ assert_matches_type(BatchCreateResponse, batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGroq) -> None:
+ batch = await async_client.batches.retrieve(
+ "batch_id",
+ )
+ assert_matches_type(BatchRetrieveResponse, batch, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGroq) -> None:
+ response = await async_client.batches.with_raw_response.retrieve(
+ "batch_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ batch = await response.parse()
+ assert_matches_type(BatchRetrieveResponse, batch, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGroq) -> None:
+ async with async_client.batches.with_streaming_response.retrieve(
+ "batch_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ batch = await response.parse()
+ assert_matches_type(BatchRetrieveResponse, batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGroq) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `batch_id` but received ''"):
+ await async_client.batches.with_raw_response.retrieve(
+ "",
+ )
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGroq) -> None:
+ batch = await async_client.batches.list()
+ assert_matches_type(BatchListResponse, batch, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGroq) -> None:
+ response = await async_client.batches.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ batch = await response.parse()
+ assert_matches_type(BatchListResponse, batch, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGroq) -> None:
+ async with async_client.batches.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ batch = await response.parse()
+ assert_matches_type(BatchListResponse, batch, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
diff --git a/tests/api_resources/test_files.py b/tests/api_resources/test_files.py
new file mode 100644
index 0000000..5aad426
--- /dev/null
+++ b/tests/api_resources/test_files.py
@@ -0,0 +1,373 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from groq import Groq, AsyncGroq
+from groq.types import (
+ FileInfoResponse,
+ FileListResponse,
+ FileCreateResponse,
+ FileDeleteResponse,
+)
+from tests.utils import assert_matches_type
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestFiles:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ def test_method_create(self, client: Groq) -> None:
+ file = client.files.create(
+ file=b"raw file contents",
+ purpose="batch",
+ )
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ def test_raw_response_create(self, client: Groq) -> None:
+ response = client.files.with_raw_response.create(
+ file=b"raw file contents",
+ purpose="batch",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ def test_streaming_response_create(self, client: Groq) -> None:
+ with client.files.with_streaming_response.create(
+ file=b"raw file contents",
+ purpose="batch",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_list(self, client: Groq) -> None:
+ file = client.files.list()
+ assert_matches_type(FileListResponse, file, path=["response"])
+
+ @parametrize
+ def test_raw_response_list(self, client: Groq) -> None:
+ response = client.files.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileListResponse, file, path=["response"])
+
+ @parametrize
+ def test_streaming_response_list(self, client: Groq) -> None:
+ with client.files.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert_matches_type(FileListResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_method_delete(self, client: Groq) -> None:
+ file = client.files.delete(
+ "file_id",
+ )
+ assert_matches_type(FileDeleteResponse, file, path=["response"])
+
+ @parametrize
+ def test_raw_response_delete(self, client: Groq) -> None:
+ response = client.files.with_raw_response.delete(
+ "file_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileDeleteResponse, file, path=["response"])
+
+ @parametrize
+ def test_streaming_response_delete(self, client: Groq) -> None:
+ with client.files.with_streaming_response.delete(
+ "file_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert_matches_type(FileDeleteResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_delete(self, client: Groq) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ client.files.with_raw_response.delete(
+ "",
+ )
+
+ @parametrize
+ def test_method_content(self, client: Groq) -> None:
+ file = client.files.content(
+ "file_id",
+ )
+ assert_matches_type(str, file, path=["response"])
+
+ @parametrize
+ def test_raw_response_content(self, client: Groq) -> None:
+ response = client.files.with_raw_response.content(
+ "file_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(str, file, path=["response"])
+
+ @parametrize
+ def test_streaming_response_content(self, client: Groq) -> None:
+ with client.files.with_streaming_response.content(
+ "file_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert_matches_type(str, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_content(self, client: Groq) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ client.files.with_raw_response.content(
+ "",
+ )
+
+ @parametrize
+ def test_method_info(self, client: Groq) -> None:
+ file = client.files.info(
+ "file_id",
+ )
+ assert_matches_type(FileInfoResponse, file, path=["response"])
+
+ @parametrize
+ def test_raw_response_info(self, client: Groq) -> None:
+ response = client.files.with_raw_response.info(
+ "file_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = response.parse()
+ assert_matches_type(FileInfoResponse, file, path=["response"])
+
+ @parametrize
+ def test_streaming_response_info(self, client: Groq) -> None:
+ with client.files.with_streaming_response.info(
+ "file_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = response.parse()
+ assert_matches_type(FileInfoResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_info(self, client: Groq) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ client.files.with_raw_response.info(
+ "",
+ )
+
+
+class TestAsyncFiles:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGroq) -> None:
+ file = await async_client.files.create(
+ file=b"raw file contents",
+ purpose="batch",
+ )
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGroq) -> None:
+ response = await async_client.files.with_raw_response.create(
+ file=b"raw file contents",
+ purpose="batch",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = await response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGroq) -> None:
+ async with async_client.files.with_streaming_response.create(
+ file=b"raw file contents",
+ purpose="batch",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert_matches_type(FileCreateResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGroq) -> None:
+ file = await async_client.files.list()
+ assert_matches_type(FileListResponse, file, path=["response"])
+
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGroq) -> None:
+ response = await async_client.files.with_raw_response.list()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = await response.parse()
+ assert_matches_type(FileListResponse, file, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGroq) -> None:
+ async with async_client.files.with_streaming_response.list() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert_matches_type(FileListResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGroq) -> None:
+ file = await async_client.files.delete(
+ "file_id",
+ )
+ assert_matches_type(FileDeleteResponse, file, path=["response"])
+
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGroq) -> None:
+ response = await async_client.files.with_raw_response.delete(
+ "file_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = await response.parse()
+ assert_matches_type(FileDeleteResponse, file, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGroq) -> None:
+ async with async_client.files.with_streaming_response.delete(
+ "file_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert_matches_type(FileDeleteResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGroq) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ await async_client.files.with_raw_response.delete(
+ "",
+ )
+
+ @parametrize
+ async def test_method_content(self, async_client: AsyncGroq) -> None:
+ file = await async_client.files.content(
+ "file_id",
+ )
+ assert_matches_type(str, file, path=["response"])
+
+ @parametrize
+ async def test_raw_response_content(self, async_client: AsyncGroq) -> None:
+ response = await async_client.files.with_raw_response.content(
+ "file_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = await response.parse()
+ assert_matches_type(str, file, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_content(self, async_client: AsyncGroq) -> None:
+ async with async_client.files.with_streaming_response.content(
+ "file_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert_matches_type(str, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_content(self, async_client: AsyncGroq) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ await async_client.files.with_raw_response.content(
+ "",
+ )
+
+ @parametrize
+ async def test_method_info(self, async_client: AsyncGroq) -> None:
+ file = await async_client.files.info(
+ "file_id",
+ )
+ assert_matches_type(FileInfoResponse, file, path=["response"])
+
+ @parametrize
+ async def test_raw_response_info(self, async_client: AsyncGroq) -> None:
+ response = await async_client.files.with_raw_response.info(
+ "file_id",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ file = await response.parse()
+ assert_matches_type(FileInfoResponse, file, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_info(self, async_client: AsyncGroq) -> None:
+ async with async_client.files.with_streaming_response.info(
+ "file_id",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ file = await response.parse()
+ assert_matches_type(FileInfoResponse, file, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_info(self, async_client: AsyncGroq) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
+ await async_client.files.with_raw_response.info(
+ "",
+ )