From 0ed39cc6af44c9e02b80ebbee247f27112963b84 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Wed, 14 Aug 2024 16:26:14 +0000
Subject: [PATCH 01/14] SDK regeneration
---
.mock/definition/prompts.yml | 12 ++++
reference.md | 47 ++++++++++++
src/label_studio_sdk/__init__.py | 2 +
src/label_studio_sdk/base_client.py | 3 +
src/label_studio_sdk/core/client_wrapper.py | 2 +-
src/label_studio_sdk/prompts/__init__.py | 2 +
src/label_studio_sdk/prompts/client.py | 80 +++++++++++++++++++++
tests/test_prompts.py | 10 +++
8 files changed, 157 insertions(+), 1 deletion(-)
create mode 100644 .mock/definition/prompts.yml
create mode 100644 src/label_studio_sdk/prompts/__init__.py
create mode 100644 src/label_studio_sdk/prompts/client.py
create mode 100644 tests/test_prompts.py
diff --git a/.mock/definition/prompts.yml b/.mock/definition/prompts.yml
new file mode 100644
index 000000000..f535a0c7b
--- /dev/null
+++ b/.mock/definition/prompts.yml
@@ -0,0 +1,12 @@
+service:
+ auth: false
+ base-path: ''
+ endpoints:
+ batch_predictions:
+ path: /api/model-run/batch-predictions/
+ method: POST
+ auth: true
+ examples:
+ - {}
+ audiences:
+ - public
diff --git a/reference.md b/reference.md
index 684bf9038..57550ada3 100644
--- a/reference.md
+++ b/reference.md
@@ -13639,6 +13639,53 @@ client.webhooks.update(
+
+
+
+
+## Prompts
+client.prompts.batch_predictions()
+
+-
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from label_studio_sdk.client import LabelStudio
+
+client = LabelStudio(
+ api_key="YOUR_API_KEY",
+)
+client.prompts.batch_predictions()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
diff --git a/src/label_studio_sdk/__init__.py b/src/label_studio_sdk/__init__.py
index e89545133..953e763bf 100644
--- a/src/label_studio_sdk/__init__.py
+++ b/src/label_studio_sdk/__init__.py
@@ -83,6 +83,7 @@
ml,
predictions,
projects,
+ prompts,
tasks,
users,
views,
@@ -267,6 +268,7 @@
"ml",
"predictions",
"projects",
+ "prompts",
"tasks",
"users",
"views",
diff --git a/src/label_studio_sdk/base_client.py b/src/label_studio_sdk/base_client.py
index 8594ad277..383a63efc 100644
--- a/src/label_studio_sdk/base_client.py
+++ b/src/label_studio_sdk/base_client.py
@@ -17,6 +17,7 @@
from .ml.client import AsyncMlClient, MlClient
from .predictions.client import AsyncPredictionsClient, PredictionsClient
from .projects.client import AsyncProjectsClient, ProjectsClient
+from .prompts.client import AsyncPromptsClient, PromptsClient
from .tasks.client import AsyncTasksClient, TasksClient
from .users.client import AsyncUsersClient, UsersClient
from .views.client import AsyncViewsClient, ViewsClient
@@ -98,6 +99,7 @@ def __init__(
self.import_storage = ImportStorageClient(client_wrapper=self._client_wrapper)
self.export_storage = ExportStorageClient(client_wrapper=self._client_wrapper)
self.webhooks = WebhooksClient(client_wrapper=self._client_wrapper)
+ self.prompts = PromptsClient(client_wrapper=self._client_wrapper)
self.comments = CommentsClient(client_wrapper=self._client_wrapper)
self.workspaces = WorkspacesClient(client_wrapper=self._client_wrapper)
@@ -176,6 +178,7 @@ def __init__(
self.import_storage = AsyncImportStorageClient(client_wrapper=self._client_wrapper)
self.export_storage = AsyncExportStorageClient(client_wrapper=self._client_wrapper)
self.webhooks = AsyncWebhooksClient(client_wrapper=self._client_wrapper)
+ self.prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper)
self.comments = AsyncCommentsClient(client_wrapper=self._client_wrapper)
self.workspaces = AsyncWorkspacesClient(client_wrapper=self._client_wrapper)
diff --git a/src/label_studio_sdk/core/client_wrapper.py b/src/label_studio_sdk/core/client_wrapper.py
index 6676cb579..1c4002b5d 100644
--- a/src/label_studio_sdk/core/client_wrapper.py
+++ b/src/label_studio_sdk/core/client_wrapper.py
@@ -17,7 +17,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "label-studio-sdk",
- "X-Fern-SDK-Version": "1.0.5",
+ "X-Fern-SDK-Version": "1.0.6",
}
headers["Authorization"] = f"Token {self.api_key}"
return headers
diff --git a/src/label_studio_sdk/prompts/__init__.py b/src/label_studio_sdk/prompts/__init__.py
new file mode 100644
index 000000000..f3ea2659b
--- /dev/null
+++ b/src/label_studio_sdk/prompts/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/label_studio_sdk/prompts/client.py b/src/label_studio_sdk/prompts/client.py
new file mode 100644
index 000000000..a742e47c0
--- /dev/null
+++ b/src/label_studio_sdk/prompts/client.py
@@ -0,0 +1,80 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.request_options import RequestOptions
+
+
+class PromptsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def batch_predictions(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from label_studio_sdk.client import LabelStudio
+
+ client = LabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.batch_predictions()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "api/model-run/batch-predictions/", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncPromptsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def batch_predictions(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ """
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ from label_studio_sdk.client import AsyncLabelStudio
+
+ client = AsyncLabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ await client.prompts.batch_predictions()
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "api/model-run/batch-predictions/", method="POST", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/tests/test_prompts.py b/tests/test_prompts.py
new file mode 100644
index 000000000..9ee1f88fb
--- /dev/null
+++ b/tests/test_prompts.py
@@ -0,0 +1,10 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from label_studio_sdk.client import AsyncLabelStudio, LabelStudio
+
+
+async def test_batch_predictions(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
+ # Type ignore to avoid mypy complaining about the function not being meant to return a value
+ assert client.prompts.batch_predictions() is None # type: ignore[func-returns-value]
+
+ assert await async_client.prompts.batch_predictions() is None # type: ignore[func-returns-value]
From 547dac94b5a90fbd26e7454021d8d2cc59aa9f49 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Wed, 14 Aug 2024 16:28:49 +0000
Subject: [PATCH 02/14] SDK regeneration
---
.mock/definition/prompts.yml | 22 ++++++-
reference.md | 40 +++++++++++-
src/label_studio_sdk/__init__.py | 2 +
src/label_studio_sdk/prompts/__init__.py | 3 +
src/label_studio_sdk/prompts/client.py | 61 ++++++++++++++++---
.../prompts/types/__init__.py | 5 ++
.../prompts_batch_predictions_response.py | 29 +++++++++
tests/test_prompts.py | 13 +++-
8 files changed, 162 insertions(+), 13 deletions(-)
create mode 100644 src/label_studio_sdk/prompts/types/__init__.py
create mode 100644 src/label_studio_sdk/prompts/types/prompts_batch_predictions_response.py
diff --git a/.mock/definition/prompts.yml b/.mock/definition/prompts.yml
index f535a0c7b..e7d55f5d4 100644
--- a/.mock/definition/prompts.yml
+++ b/.mock/definition/prompts.yml
@@ -1,3 +1,7 @@
+types:
+ PromptsBatchPredictionsResponse:
+ properties:
+ detail: optional
service:
auth: false
base-path: ''
@@ -6,7 +10,23 @@ service:
path: /api/model-run/batch-predictions/
method: POST
auth: true
+ docs: |
+ Create a new batch prediction.
+ display-name: Create batch predictions
+ request:
+ name: PromptsBatchPredictionsRequest
+ body:
+ properties:
+ job_id: optional
+ modelrun_id: optional
+ results: optional>>
+ response:
+ docs: ''
+ type: PromptsBatchPredictionsResponse
examples:
- - {}
+ - request: {}
+ response:
+ body:
+ detail: detail
audiences:
- public
diff --git a/reference.md b/reference.md
index 57550ada3..e75045e0a 100644
--- a/reference.md
+++ b/reference.md
@@ -13644,10 +13644,24 @@ client.webhooks.update(
## Prompts
-client.prompts.batch_predictions()
+client.prompts.batch_predictions(...)
-
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create a new batch prediction.
+
+
+
+
+
#### 🔌 Usage
@@ -13678,6 +13692,30 @@ client.prompts.batch_predictions()
-
+**job_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**modelrun_id:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**results:** `typing.Optional[typing.Sequence[typing.Dict[str, typing.Any]]]`
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
diff --git a/src/label_studio_sdk/__init__.py b/src/label_studio_sdk/__init__.py
index 953e763bf..a99a2a078 100644
--- a/src/label_studio_sdk/__init__.py
+++ b/src/label_studio_sdk/__init__.py
@@ -116,6 +116,7 @@
MlUpdateResponseAuthMethod,
)
from .projects import ProjectsCreateResponse, ProjectsImportTasksResponse, ProjectsListResponse, ProjectsUpdateResponse
+from .prompts import PromptsBatchPredictionsResponse
from .tasks import TasksListRequestFields, TasksListResponse
from .users import UsersGetTokenResponse, UsersResetTokenResponse
from .version import __version__
@@ -214,6 +215,7 @@
"ProjectsImportTasksResponse",
"ProjectsListResponse",
"ProjectsUpdateResponse",
+ "PromptsBatchPredictionsResponse",
"RedisExportStorage",
"RedisExportStorageStatus",
"RedisImportStorage",
diff --git a/src/label_studio_sdk/prompts/__init__.py b/src/label_studio_sdk/prompts/__init__.py
index f3ea2659b..c12afdbea 100644
--- a/src/label_studio_sdk/prompts/__init__.py
+++ b/src/label_studio_sdk/prompts/__init__.py
@@ -1,2 +1,5 @@
# This file was auto-generated by Fern from our API Definition.
+from .types import PromptsBatchPredictionsResponse
+
+__all__ = ["PromptsBatchPredictionsResponse"]
diff --git a/src/label_studio_sdk/prompts/client.py b/src/label_studio_sdk/prompts/client.py
index a742e47c0..e22cd1eb1 100644
--- a/src/label_studio_sdk/prompts/client.py
+++ b/src/label_studio_sdk/prompts/client.py
@@ -5,23 +5,44 @@
from ..core.api_error import ApiError
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
from ..core.request_options import RequestOptions
+from .types.prompts_batch_predictions_response import PromptsBatchPredictionsResponse
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
class PromptsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
- def batch_predictions(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ def batch_predictions(
+ self,
+ *,
+ job_id: typing.Optional[str] = OMIT,
+ modelrun_id: typing.Optional[int] = OMIT,
+ results: typing.Optional[typing.Sequence[typing.Dict[str, typing.Any]]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptsBatchPredictionsResponse:
"""
+ Create a new batch prediction.
+
Parameters
----------
+ job_id : typing.Optional[str]
+
+ modelrun_id : typing.Optional[int]
+
+ results : typing.Optional[typing.Sequence[typing.Dict[str, typing.Any]]]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- None
+ PromptsBatchPredictionsResponse
+
Examples
--------
@@ -33,11 +54,15 @@ def batch_predictions(self, *, request_options: typing.Optional[RequestOptions]
client.prompts.batch_predictions()
"""
_response = self._client_wrapper.httpx_client.request(
- "api/model-run/batch-predictions/", method="POST", request_options=request_options
+ "api/model-run/batch-predictions/",
+ method="POST",
+ json={"job_id": job_id, "modelrun_id": modelrun_id, "results": results},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return
+ return pydantic_v1.parse_obj_as(PromptsBatchPredictionsResponse, _response.json()) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -48,16 +73,32 @@ class AsyncPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
- async def batch_predictions(self, *, request_options: typing.Optional[RequestOptions] = None) -> None:
+ async def batch_predictions(
+ self,
+ *,
+ job_id: typing.Optional[str] = OMIT,
+ modelrun_id: typing.Optional[int] = OMIT,
+ results: typing.Optional[typing.Sequence[typing.Dict[str, typing.Any]]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptsBatchPredictionsResponse:
"""
+ Create a new batch prediction.
+
Parameters
----------
+ job_id : typing.Optional[str]
+
+ modelrun_id : typing.Optional[int]
+
+ results : typing.Optional[typing.Sequence[typing.Dict[str, typing.Any]]]
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- None
+ PromptsBatchPredictionsResponse
+
Examples
--------
@@ -69,11 +110,15 @@ async def batch_predictions(self, *, request_options: typing.Optional[RequestOpt
await client.prompts.batch_predictions()
"""
_response = await self._client_wrapper.httpx_client.request(
- "api/model-run/batch-predictions/", method="POST", request_options=request_options
+ "api/model-run/batch-predictions/",
+ method="POST",
+ json={"job_id": job_id, "modelrun_id": modelrun_id, "results": results},
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return
+ return pydantic_v1.parse_obj_as(PromptsBatchPredictionsResponse, _response.json()) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/label_studio_sdk/prompts/types/__init__.py b/src/label_studio_sdk/prompts/types/__init__.py
new file mode 100644
index 000000000..0d11673b2
--- /dev/null
+++ b/src/label_studio_sdk/prompts/types/__init__.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+from .prompts_batch_predictions_response import PromptsBatchPredictionsResponse
+
+__all__ = ["PromptsBatchPredictionsResponse"]
diff --git a/src/label_studio_sdk/prompts/types/prompts_batch_predictions_response.py b/src/label_studio_sdk/prompts/types/prompts_batch_predictions_response.py
new file mode 100644
index 000000000..432b25b29
--- /dev/null
+++ b/src/label_studio_sdk/prompts/types/prompts_batch_predictions_response.py
@@ -0,0 +1,29 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ...core.datetime_utils import serialize_datetime
+from ...core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+
+
+class PromptsBatchPredictionsResponse(pydantic_v1.BaseModel):
+ detail: typing.Optional[str] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/tests/test_prompts.py b/tests/test_prompts.py
index 9ee1f88fb..755c0db81 100644
--- a/tests/test_prompts.py
+++ b/tests/test_prompts.py
@@ -1,10 +1,17 @@
# This file was auto-generated by Fern from our API Definition.
+import typing
+
from label_studio_sdk.client import AsyncLabelStudio, LabelStudio
+from .utilities import validate_response
+
async def test_batch_predictions(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
- # Type ignore to avoid mypy complaining about the function not being meant to return a value
- assert client.prompts.batch_predictions() is None # type: ignore[func-returns-value]
+ expected_response: typing.Any = {"detail": "detail"}
+ expected_types: typing.Any = {"detail": None}
+ response = client.prompts.batch_predictions()
+ validate_response(response, expected_response, expected_types)
- assert await async_client.prompts.batch_predictions() is None # type: ignore[func-returns-value]
+ async_response = await async_client.prompts.batch_predictions()
+ validate_response(async_response, expected_response, expected_types)
From af843e7c2aba091c782d25312234ead9a1efd38d Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Wed, 14 Aug 2024 19:11:12 +0000
Subject: [PATCH 03/14] SDK regeneration
---
.mock/definition/prompts.yml | 1 -
reference.md | 8 --------
src/label_studio_sdk/prompts/client.py | 10 ++--------
3 files changed, 2 insertions(+), 17 deletions(-)
diff --git a/.mock/definition/prompts.yml b/.mock/definition/prompts.yml
index e7d55f5d4..9cf965078 100644
--- a/.mock/definition/prompts.yml
+++ b/.mock/definition/prompts.yml
@@ -17,7 +17,6 @@ service:
name: PromptsBatchPredictionsRequest
body:
properties:
- job_id: optional
modelrun_id: optional
results: optional>>
response:
diff --git a/reference.md b/reference.md
index e75045e0a..890ab33eb 100644
--- a/reference.md
+++ b/reference.md
@@ -13692,14 +13692,6 @@ client.prompts.batch_predictions()
-
-**job_id:** `typing.Optional[str]`
-
-
-
-
-
--
-
**modelrun_id:** `typing.Optional[int]`
diff --git a/src/label_studio_sdk/prompts/client.py b/src/label_studio_sdk/prompts/client.py
index e22cd1eb1..7c60da6a7 100644
--- a/src/label_studio_sdk/prompts/client.py
+++ b/src/label_studio_sdk/prompts/client.py
@@ -20,7 +20,6 @@ def __init__(self, *, client_wrapper: SyncClientWrapper):
def batch_predictions(
self,
*,
- job_id: typing.Optional[str] = OMIT,
modelrun_id: typing.Optional[int] = OMIT,
results: typing.Optional[typing.Sequence[typing.Dict[str, typing.Any]]] = OMIT,
request_options: typing.Optional[RequestOptions] = None
@@ -30,8 +29,6 @@ def batch_predictions(
Parameters
----------
- job_id : typing.Optional[str]
-
modelrun_id : typing.Optional[int]
results : typing.Optional[typing.Sequence[typing.Dict[str, typing.Any]]]
@@ -56,7 +53,7 @@ def batch_predictions(
_response = self._client_wrapper.httpx_client.request(
"api/model-run/batch-predictions/",
method="POST",
- json={"job_id": job_id, "modelrun_id": modelrun_id, "results": results},
+ json={"modelrun_id": modelrun_id, "results": results},
request_options=request_options,
omit=OMIT,
)
@@ -76,7 +73,6 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper):
async def batch_predictions(
self,
*,
- job_id: typing.Optional[str] = OMIT,
modelrun_id: typing.Optional[int] = OMIT,
results: typing.Optional[typing.Sequence[typing.Dict[str, typing.Any]]] = OMIT,
request_options: typing.Optional[RequestOptions] = None
@@ -86,8 +82,6 @@ async def batch_predictions(
Parameters
----------
- job_id : typing.Optional[str]
-
modelrun_id : typing.Optional[int]
results : typing.Optional[typing.Sequence[typing.Dict[str, typing.Any]]]
@@ -112,7 +106,7 @@ async def batch_predictions(
_response = await self._client_wrapper.httpx_client.request(
"api/model-run/batch-predictions/",
method="POST",
- json={"job_id": job_id, "modelrun_id": modelrun_id, "results": results},
+ json={"modelrun_id": modelrun_id, "results": results},
request_options=request_options,
omit=OMIT,
)
From 18f3d20b66ed299352131d54f56bc3ddb8fa1d21 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Wed, 14 Aug 2024 20:03:39 +0000
Subject: [PATCH 04/14] SDK regeneration
---
.mock/definition/__package__.yml | 217 +++++++++++++++++-
.mock/definition/actions.yml | 28 ++-
.mock/definition/annotations.yml | 2 +
.mock/definition/comments.yml | 2 +
.mock/definition/dataManager.yml | 44 +++-
.../{exportStorage.yml => export_storage.yml} | 4 +
.../azure.yml | 6 +
.../{exportStorage => export_storage}/gcs.yml | 6 +
.../local.yml | 6 +
.../redis.yml | 6 +
.../{exportStorage => export_storage}/s3.yml | 6 +
.../s3S.yml => export_storage/s3s.yml} | 2 +
.mock/definition/files.yml | 2 +
.../{importStorage.yml => import_storage.yml} | 4 +
.../azure.yml | 6 +
.../{importStorage => import_storage}/gcs.yml | 6 +
.../local.yml | 6 +
.../redis.yml | 6 +
.../{importStorage => import_storage}/s3.yml | 6 +
.../s3S.yml => import_storage/s3s.yml} | 2 +
.mock/definition/labels.yml | 4 +
.mock/definition/ml.yml | 14 ++
.mock/definition/organizations.yml | 2 +
.mock/definition/organizations/members.yml | 4 +
.mock/definition/predictions.yml | 2 +
.mock/definition/projects.yml | 10 +
.mock/definition/projects/exports.yml | 2 +
.mock/definition/projects/labels.yml | 4 +
.mock/definition/prompts.yml | 76 +++++-
.mock/definition/tasks.yml | 6 +
.mock/definition/users.yml | 6 +
.mock/definition/views.yml | 42 +++-
.mock/definition/webhooks.yml | 6 +
.mock/definition/workspaces.yml | 2 +
.mock/definition/workspaces/members.yml | 6 +
.mock/fern.config.json | 2 +-
reference.md | 135 +++++++++++
src/label_studio_sdk/__init__.py | 6 +
src/label_studio_sdk/prompts/client.py | 155 +++++++++++++
src/label_studio_sdk/types/__init__.py | 6 +
src/label_studio_sdk/types/prompt.py | 74 ++++++
.../types/prompt_created_by.py | 5 +
.../types/prompt_organization.py | 5 +
tests/test_prompts.py | 72 ++++++
44 files changed, 994 insertions(+), 19 deletions(-)
rename .mock/definition/{exportStorage.yml => export_storage.yml} (90%)
rename .mock/definition/{exportStorage => export_storage}/azure.yml (98%)
rename .mock/definition/{exportStorage => export_storage}/gcs.yml (99%)
rename .mock/definition/{exportStorage => export_storage}/local.yml (98%)
rename .mock/definition/{exportStorage => export_storage}/redis.yml (98%)
rename .mock/definition/{exportStorage => export_storage}/s3.yml (99%)
rename .mock/definition/{exportStorage/s3S.yml => export_storage/s3s.yml} (99%)
rename .mock/definition/{importStorage.yml => import_storage.yml} (90%)
rename .mock/definition/{importStorage => import_storage}/azure.yml (99%)
rename .mock/definition/{importStorage => import_storage}/gcs.yml (99%)
rename .mock/definition/{importStorage => import_storage}/local.yml (98%)
rename .mock/definition/{importStorage => import_storage}/redis.yml (99%)
rename .mock/definition/{importStorage => import_storage}/s3.yml (99%)
rename .mock/definition/{importStorage/s3S.yml => import_storage/s3s.yml} (99%)
create mode 100644 src/label_studio_sdk/types/prompt.py
create mode 100644 src/label_studio_sdk/types/prompt_created_by.py
create mode 100644 src/label_studio_sdk/types/prompt_organization.py
diff --git a/.mock/definition/__package__.yml b/.mock/definition/__package__.yml
index ef8b98866..1d45247d0 100644
--- a/.mock/definition/__package__.yml
+++ b/.mock/definition/__package__.yml
@@ -28,6 +28,8 @@ types:
- fixed_and_accepted
- deleted_review
docs: Action which was performed in the last annotation history item
+ source:
+ openapi: openapi/openapi.yaml
Annotation:
properties:
id: optional
@@ -92,6 +94,8 @@ types:
last_created_by:
type: optional
docs: User who created the last annotation history item
+ source:
+ openapi: openapi/openapi.yaml
BaseUser:
properties:
id: optional
@@ -124,6 +128,8 @@ types:
allow_newsletters:
type: optional
docs: Allow sending newsletters to user
+ source:
+ openapi: openapi/openapi.yaml
Filter:
properties:
id: optional
@@ -151,6 +157,8 @@ types:
value:
type: optional
## Prompts
+client.prompts.list()
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Get a list of prompts.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from label_studio_sdk.client import LabelStudio
+
+client = LabelStudio(
+ api_key="YOUR_API_KEY",
+)
+client.prompts.list()
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.prompts.create(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create a new prompt.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from label_studio_sdk import Prompt
+from label_studio_sdk.client import LabelStudio
+
+client = LabelStudio(
+ api_key="YOUR_API_KEY",
+)
+client.prompts.create(
+ request=Prompt(
+ title="title",
+ input_fields=["input_fields"],
+ output_classes=["output_classes"],
+ ),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**request:** `Prompt`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
client.prompts.batch_predictions(...)
-
diff --git a/src/label_studio_sdk/__init__.py b/src/label_studio_sdk/__init__.py
index a99a2a078..5ccceef41 100644
--- a/src/label_studio_sdk/__init__.py
+++ b/src/label_studio_sdk/__init__.py
@@ -48,6 +48,9 @@
ProjectLabelConfig,
ProjectSampling,
ProjectSkipQueue,
+ Prompt,
+ PromptCreatedBy,
+ PromptOrganization,
RedisExportStorage,
RedisExportStorageStatus,
RedisImportStorage,
@@ -215,6 +218,9 @@
"ProjectsImportTasksResponse",
"ProjectsListResponse",
"ProjectsUpdateResponse",
+ "Prompt",
+ "PromptCreatedBy",
+ "PromptOrganization",
"PromptsBatchPredictionsResponse",
"RedisExportStorage",
"RedisExportStorageStatus",
diff --git a/src/label_studio_sdk/prompts/client.py b/src/label_studio_sdk/prompts/client.py
index 7c60da6a7..058782335 100644
--- a/src/label_studio_sdk/prompts/client.py
+++ b/src/label_studio_sdk/prompts/client.py
@@ -7,6 +7,7 @@
from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
from ..core.pydantic_utilities import pydantic_v1
from ..core.request_options import RequestOptions
+from ..types.prompt import Prompt
from .types.prompts_batch_predictions_response import PromptsBatchPredictionsResponse
# this is used as the default value for optional parameters
@@ -17,6 +18,83 @@ class PromptsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
+ def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[Prompt]:
+ """
+ Get a list of prompts.
+
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[Prompt]
+
+
+ Examples
+ --------
+ from label_studio_sdk.client import LabelStudio
+
+ client = LabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.list()
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "api/prompts/", method="GET", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(typing.List[Prompt], _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def create(self, *, request: Prompt, request_options: typing.Optional[RequestOptions] = None) -> Prompt:
+ """
+ Create a new prompt.
+
+ Parameters
+ ----------
+ request : Prompt
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Prompt
+
+
+ Examples
+ --------
+ from label_studio_sdk import Prompt
+ from label_studio_sdk.client import LabelStudio
+
+ client = LabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.create(
+ request=Prompt(
+ title="title",
+ input_fields=["input_fields"],
+ output_classes=["output_classes"],
+ ),
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "api/prompts/", method="POST", json=request, request_options=request_options, omit=OMIT
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Prompt, _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def batch_predictions(
self,
*,
@@ -70,6 +148,83 @@ class AsyncPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
+ async def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[Prompt]:
+ """
+ Get a list of prompts.
+
+ Parameters
+ ----------
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ typing.List[Prompt]
+
+
+ Examples
+ --------
+ from label_studio_sdk.client import AsyncLabelStudio
+
+ client = AsyncLabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ await client.prompts.list()
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "api/prompts/", method="GET", request_options=request_options
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(typing.List[Prompt], _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def create(self, *, request: Prompt, request_options: typing.Optional[RequestOptions] = None) -> Prompt:
+ """
+ Create a new prompt.
+
+ Parameters
+ ----------
+ request : Prompt
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ Prompt
+
+
+ Examples
+ --------
+ from label_studio_sdk import Prompt
+ from label_studio_sdk.client import AsyncLabelStudio
+
+ client = AsyncLabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ await client.prompts.create(
+ request=Prompt(
+ title="title",
+ input_fields=["input_fields"],
+ output_classes=["output_classes"],
+ ),
+ )
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "api/prompts/", method="POST", json=request, request_options=request_options, omit=OMIT
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(Prompt, _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
async def batch_predictions(
self,
*,
diff --git a/src/label_studio_sdk/types/__init__.py b/src/label_studio_sdk/types/__init__.py
index ed9e52d77..d0b336a29 100644
--- a/src/label_studio_sdk/types/__init__.py
+++ b/src/label_studio_sdk/types/__init__.py
@@ -47,6 +47,9 @@
from .project_label_config import ProjectLabelConfig
from .project_sampling import ProjectSampling
from .project_skip_queue import ProjectSkipQueue
+from .prompt import Prompt
+from .prompt_created_by import PromptCreatedBy
+from .prompt_organization import PromptOrganization
from .redis_export_storage import RedisExportStorage
from .redis_export_storage_status import RedisExportStorageStatus
from .redis_import_storage import RedisImportStorage
@@ -119,6 +122,9 @@
"ProjectLabelConfig",
"ProjectSampling",
"ProjectSkipQueue",
+ "Prompt",
+ "PromptCreatedBy",
+ "PromptOrganization",
"RedisExportStorage",
"RedisExportStorageStatus",
"RedisImportStorage",
diff --git a/src/label_studio_sdk/types/prompt.py b/src/label_studio_sdk/types/prompt.py
new file mode 100644
index 000000000..728cf4949
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt.py
@@ -0,0 +1,74 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .prompt_created_by import PromptCreatedBy
+from .prompt_organization import PromptOrganization
+
+
+class Prompt(pydantic_v1.BaseModel):
+ title: str = pydantic_v1.Field()
+ """
+ Title of the prompt
+ """
+
+ description: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Description of the prompt
+ """
+
+ created_by: typing.Optional[PromptCreatedBy] = pydantic_v1.Field(default=None)
+ """
+ User ID of the creator of the prompt
+ """
+
+ created_at: typing.Optional[dt.datetime] = pydantic_v1.Field(default=None)
+ """
+ Date and time the prompt was created
+ """
+
+ updated_at: typing.Optional[dt.datetime] = pydantic_v1.Field(default=None)
+ """
+ Date and time the prompt was last updated
+ """
+
+ organization: typing.Optional[PromptOrganization] = pydantic_v1.Field(default=None)
+ """
+ Organization ID of the prompt
+ """
+
+ input_fields: typing.List[str] = pydantic_v1.Field()
+ """
+ List of input fields
+ """
+
+ output_classes: typing.List[str] = pydantic_v1.Field()
+ """
+ List of output classes
+ """
+
+ associated_projects: typing.Optional[typing.List[int]] = pydantic_v1.Field(default=None)
+ """
+ List of associated projects IDs
+ """
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/label_studio_sdk/types/prompt_created_by.py b/src/label_studio_sdk/types/prompt_created_by.py
new file mode 100644
index 000000000..efe14c6c3
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt_created_by.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PromptCreatedBy = typing.Union[int, typing.Dict[str, typing.Any]]
diff --git a/src/label_studio_sdk/types/prompt_organization.py b/src/label_studio_sdk/types/prompt_organization.py
new file mode 100644
index 000000000..1f1a1158c
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt_organization.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PromptOrganization = typing.Union[int, typing.Dict[str, typing.Any]]
diff --git a/tests/test_prompts.py b/tests/test_prompts.py
index 755c0db81..45e217723 100644
--- a/tests/test_prompts.py
+++ b/tests/test_prompts.py
@@ -2,11 +2,83 @@
import typing
+from label_studio_sdk import Prompt
from label_studio_sdk.client import AsyncLabelStudio, LabelStudio
from .utilities import validate_response
+async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
+ expected_response: typing.Any = [
+ {
+ "title": "title",
+ "description": "description",
+ "created_by": 1,
+ "created_at": "2024-01-15T09:30:00Z",
+ "updated_at": "2024-01-15T09:30:00Z",
+ "organization": 1,
+ "input_fields": ["input_fields"],
+ "output_classes": ["output_classes"],
+ "associated_projects": [1],
+ }
+ ]
+ expected_types: typing.Any = (
+ "list",
+ {
+ 0: {
+ "title": None,
+ "description": None,
+ "created_by": "integer",
+ "created_at": "datetime",
+ "updated_at": "datetime",
+ "organization": "integer",
+ "input_fields": ("list", {0: None}),
+ "output_classes": ("list", {0: None}),
+ "associated_projects": ("list", {0: "integer"}),
+ }
+ },
+ )
+ response = client.prompts.list()
+ validate_response(response, expected_response, expected_types)
+
+ async_response = await async_client.prompts.list()
+ validate_response(async_response, expected_response, expected_types)
+
+
+async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
+ expected_response: typing.Any = {
+ "title": "title",
+ "description": "description",
+ "created_by": 1,
+ "created_at": "2024-01-15T09:30:00Z",
+ "updated_at": "2024-01-15T09:30:00Z",
+ "organization": 1,
+ "input_fields": ["input_fields"],
+ "output_classes": ["output_classes"],
+ "associated_projects": [1],
+ }
+ expected_types: typing.Any = {
+ "title": None,
+ "description": None,
+ "created_by": "integer",
+ "created_at": "datetime",
+ "updated_at": "datetime",
+ "organization": "integer",
+ "input_fields": ("list", {0: None}),
+ "output_classes": ("list", {0: None}),
+ "associated_projects": ("list", {0: "integer"}),
+ }
+ response = client.prompts.create(
+ request=Prompt(title="title", input_fields=["input_fields"], output_classes=["output_classes"])
+ )
+ validate_response(response, expected_response, expected_types)
+
+ async_response = await async_client.prompts.create(
+ request=Prompt(title="title", input_fields=["input_fields"], output_classes=["output_classes"])
+ )
+ validate_response(async_response, expected_response, expected_types)
+
+
async def test_batch_predictions(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
expected_response: typing.Any = {"detail": "detail"}
expected_types: typing.Any = {"detail": None}
From f954b187cee94a0399e4129ac4906714d268b630 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Wed, 14 Aug 2024 21:37:24 +0000
Subject: [PATCH 05/14] SDK regeneration
---
.mock/definition/__package__.yml | 84 ++++++
.mock/definition/prompts/versions.yml | 89 +++++++
poetry.lock | 10 +-
reference.md | 178 +++++++++++++
src/label_studio_sdk/__init__.py | 18 ++
src/label_studio_sdk/prompts/__init__.py | 3 +-
src/label_studio_sdk/prompts/client.py | 3 +
.../prompts/versions/__init__.py | 2 +
.../prompts/versions/client.py | 245 ++++++++++++++++++
src/label_studio_sdk/types/__init__.py | 18 ++
src/label_studio_sdk/types/prompt_run.py | 46 ++++
.../types/prompt_run_created_by.py | 5 +
.../types/prompt_run_organization.py | 5 +
.../types/prompt_run_project_subset.py | 5 +
.../types/prompt_run_status.py | 5 +
src/label_studio_sdk/types/prompt_version.py | 40 +++
.../types/prompt_version_created_by.py | 5 +
.../types/prompt_version_organization.py | 5 +
.../types/prompt_version_provider.py | 5 +
tests/prompts/__init__.py | 2 +
tests/prompts/test_versions.py | 86 ++++++
21 files changed, 853 insertions(+), 6 deletions(-)
create mode 100644 .mock/definition/prompts/versions.yml
create mode 100644 src/label_studio_sdk/prompts/versions/__init__.py
create mode 100644 src/label_studio_sdk/prompts/versions/client.py
create mode 100644 src/label_studio_sdk/types/prompt_run.py
create mode 100644 src/label_studio_sdk/types/prompt_run_created_by.py
create mode 100644 src/label_studio_sdk/types/prompt_run_organization.py
create mode 100644 src/label_studio_sdk/types/prompt_run_project_subset.py
create mode 100644 src/label_studio_sdk/types/prompt_run_status.py
create mode 100644 src/label_studio_sdk/types/prompt_version.py
create mode 100644 src/label_studio_sdk/types/prompt_version_created_by.py
create mode 100644 src/label_studio_sdk/types/prompt_version_organization.py
create mode 100644 src/label_studio_sdk/types/prompt_version_provider.py
create mode 100644 tests/prompts/__init__.py
create mode 100644 tests/prompts/test_versions.py
diff --git a/.mock/definition/__package__.yml b/.mock/definition/__package__.yml
index 1d45247d0..6de890cc2 100644
--- a/.mock/definition/__package__.yml
+++ b/.mock/definition/__package__.yml
@@ -2307,6 +2307,90 @@ types:
docs: List of associated projects IDs
source:
openapi: openapi/openapi.yaml
+ PromptVersionProvider:
+ enum:
+ - OpenAI
+ - AzureOpenAI
+ source:
+ openapi: openapi/openapi.yaml
+ PromptVersionCreatedBy:
+ discriminated: false
+ union:
+ - integer
+ - map
+ source:
+ openapi: openapi/openapi.yaml
+ PromptVersionOrganization:
+ discriminated: false
+ union:
+ - integer
+ - map
+ source:
+ openapi: openapi/openapi.yaml
+ PromptVersion:
+ properties:
+ title:
+ type: string
+ validation:
+ maxLength: 500
+ parent_model: optional
+ prompt: string
+ provider: PromptVersionProvider
+ provider_model_id: string
+ created_by: optional
+ created_at: optional
+ updated_at: optional
+ organization: optional
+ source:
+ openapi: openapi/openapi.yaml
+ PromptRunOrganization:
+ discriminated: false
+ union:
+ - integer
+ - map
+ source:
+ openapi: openapi/openapi.yaml
+ PromptRunCreatedBy:
+ discriminated: false
+ union:
+ - integer
+ - map
+ source:
+ openapi: openapi/openapi.yaml
+ PromptRunProjectSubset:
+ enum:
+ - All
+ - HasGT
+ - Sample
+ source:
+ openapi: openapi/openapi.yaml
+ PromptRunStatus:
+ enum:
+ - Pending
+ - InProgress
+ - Completed
+ - Failed
+ - Canceled
+ source:
+ openapi: openapi/openapi.yaml
+ PromptRun:
+ properties:
+ organization: optional
+ project: integer
+ model_version: optional
+ created_by: optional
+ project_subset: PromptRunProjectSubset
+ status: optional
+ job_id: optional
+ total_predictions: optional
+ total_correct_predictions: optional
+ total_tasks: optional
+ created_at: optional
+ triggered_at: optional
+ predictions_updated_at: optional
+ completed_at: optional
+ source:
+ openapi: openapi/openapi.yaml
CommentCreatedBy:
discriminated: false
union:
diff --git a/.mock/definition/prompts/versions.yml b/.mock/definition/prompts/versions.yml
new file mode 100644
index 000000000..5c44f0e89
--- /dev/null
+++ b/.mock/definition/prompts/versions.yml
@@ -0,0 +1,89 @@
+imports:
+ root: ../__package__.yml
+service:
+ auth: false
+ base-path: ''
+ endpoints:
+ create:
+ path: /api/prompts/{id}/versions
+ method: POST
+ auth: true
+ docs: |
+ Create a new version of a prompt.
+ path-parameters:
+ id:
+ type: integer
+ docs: Prompt ID
+ display-name: Create prompt version
+ request:
+ body: root.PromptVersion
+ response:
+ docs: ''
+ type: root.PromptVersion
+ examples:
+ - path-parameters:
+ id: 1
+ request:
+ title: title
+ prompt: prompt
+ provider: OpenAI
+ provider_model_id: provider_model_id
+ response:
+ body:
+ title: title
+ parent_model: 1
+ prompt: prompt
+ provider: OpenAI
+ provider_model_id: provider_model_id
+ created_by: 1
+ created_at: '2024-01-15T09:30:00Z'
+ updated_at: '2024-01-15T09:30:00Z'
+ organization: 1
+ audiences:
+ - public
+ run:
+ path: /api/prompts/{id}/versions/{version_id}/inference-runs
+ method: POST
+ auth: true
+ docs: |
+ Run a prompt.
+ path-parameters:
+ id:
+ type: integer
+ docs: Prompt ID
+ version_id:
+ type: integer
+ docs: Prompt Version ID
+ display-name: Run prompt
+ request:
+ body: root.PromptRun
+ response:
+ docs: ''
+ type: root.PromptRun
+ examples:
+ - path-parameters:
+ id: 1
+ version_id: 1
+ request:
+ project: 1
+ project_subset: All
+ response:
+ body:
+ organization: 1
+ project: 1
+ model_version: model_version
+ created_by: 1
+ project_subset: All
+ status: Pending
+ job_id: job_id
+ total_predictions: 1
+ total_correct_predictions: 1
+ total_tasks: 1
+ created_at: '2024-01-15T09:30:00Z'
+ triggered_at: '2024-01-15T09:30:00Z'
+ predictions_updated_at: '2024-01-15T09:30:00Z'
+ completed_at: '2024-01-15T09:30:00Z'
+ audiences:
+ - public
+ source:
+ openapi: openapi/openapi.yaml
diff --git a/poetry.lock b/poetry.lock
index fe476df59..6690174bd 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -387,21 +387,21 @@ files = [
[[package]]
name = "importlib-resources"
-version = "6.4.0"
+version = "6.4.2"
description = "Read resources from Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "importlib_resources-6.4.0-py3-none-any.whl", hash = "sha256:50d10f043df931902d4194ea07ec57960f66a80449ff867bfe782b4c486ba78c"},
- {file = "importlib_resources-6.4.0.tar.gz", hash = "sha256:cdb2b453b8046ca4e3798eb1d84f3cce1446a0e8e7b5ef4efb600f19fc398145"},
+ {file = "importlib_resources-6.4.2-py3-none-any.whl", hash = "sha256:8bba8c54a8a3afaa1419910845fa26ebd706dc716dd208d9b158b4b6966f5c5c"},
+ {file = "importlib_resources-6.4.2.tar.gz", hash = "sha256:6cbfbefc449cc6e2095dd184691b7a12a04f40bc75dd4c55d31c34f174cdf57a"},
]
[package.dependencies]
zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
-testing = ["jaraco.test (>=5.4)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+test = ["jaraco.test (>=5.4)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"]
[[package]]
name = "iniconfig"
diff --git a/reference.md b/reference.md
index 401097478..0fe5b5fa1 100644
--- a/reference.md
+++ b/reference.md
@@ -13851,6 +13851,184 @@ client.prompts.batch_predictions()
+
+
+
+
+## Prompts Versions
+client.prompts.versions.create(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create a new version of a prompt.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from label_studio_sdk import PromptVersion
+from label_studio_sdk.client import LabelStudio
+
+client = LabelStudio(
+ api_key="YOUR_API_KEY",
+)
+client.prompts.versions.create(
+ id=1,
+ request=PromptVersion(
+ title="title",
+ prompt="prompt",
+ provider="OpenAI",
+ provider_model_id="provider_model_id",
+ ),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `int` — Prompt ID
+
+
+
+
+
+-
+
+**request:** `PromptVersion`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
+
+
+
+
+client.prompts.versions.run(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Run a prompt.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from label_studio_sdk import PromptRun
+from label_studio_sdk.client import LabelStudio
+
+client = LabelStudio(
+ api_key="YOUR_API_KEY",
+)
+client.prompts.versions.run(
+ id=1,
+ version_id=1,
+ request=PromptRun(
+ project=1,
+ project_subset="All",
+ ),
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**id:** `int` — Prompt ID
+
+
+
+
+
+-
+
+**version_id:** `int` — Prompt Version ID
+
+
+
+
+
+-
+
+**request:** `PromptRun`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
diff --git a/src/label_studio_sdk/__init__.py b/src/label_studio_sdk/__init__.py
index 5ccceef41..7bc684417 100644
--- a/src/label_studio_sdk/__init__.py
+++ b/src/label_studio_sdk/__init__.py
@@ -51,6 +51,15 @@
Prompt,
PromptCreatedBy,
PromptOrganization,
+ PromptRun,
+ PromptRunCreatedBy,
+ PromptRunOrganization,
+ PromptRunProjectSubset,
+ PromptRunStatus,
+ PromptVersion,
+ PromptVersionCreatedBy,
+ PromptVersionOrganization,
+ PromptVersionProvider,
RedisExportStorage,
RedisExportStorageStatus,
RedisImportStorage,
@@ -221,6 +230,15 @@
"Prompt",
"PromptCreatedBy",
"PromptOrganization",
+ "PromptRun",
+ "PromptRunCreatedBy",
+ "PromptRunOrganization",
+ "PromptRunProjectSubset",
+ "PromptRunStatus",
+ "PromptVersion",
+ "PromptVersionCreatedBy",
+ "PromptVersionOrganization",
+ "PromptVersionProvider",
"PromptsBatchPredictionsResponse",
"RedisExportStorage",
"RedisExportStorageStatus",
diff --git a/src/label_studio_sdk/prompts/__init__.py b/src/label_studio_sdk/prompts/__init__.py
index c12afdbea..99b6e8137 100644
--- a/src/label_studio_sdk/prompts/__init__.py
+++ b/src/label_studio_sdk/prompts/__init__.py
@@ -1,5 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
from .types import PromptsBatchPredictionsResponse
+from . import versions
-__all__ = ["PromptsBatchPredictionsResponse"]
+__all__ = ["PromptsBatchPredictionsResponse", "versions"]
diff --git a/src/label_studio_sdk/prompts/client.py b/src/label_studio_sdk/prompts/client.py
index 058782335..db7db44ed 100644
--- a/src/label_studio_sdk/prompts/client.py
+++ b/src/label_studio_sdk/prompts/client.py
@@ -9,6 +9,7 @@
from ..core.request_options import RequestOptions
from ..types.prompt import Prompt
from .types.prompts_batch_predictions_response import PromptsBatchPredictionsResponse
+from .versions.client import AsyncVersionsClient, VersionsClient
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -17,6 +18,7 @@
class PromptsClient:
def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
+ self.versions = VersionsClient(client_wrapper=self._client_wrapper)
def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[Prompt]:
"""
@@ -147,6 +149,7 @@ def batch_predictions(
class AsyncPromptsClient:
def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
+ self.versions = AsyncVersionsClient(client_wrapper=self._client_wrapper)
async def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[Prompt]:
"""
diff --git a/src/label_studio_sdk/prompts/versions/__init__.py b/src/label_studio_sdk/prompts/versions/__init__.py
new file mode 100644
index 000000000..f3ea2659b
--- /dev/null
+++ b/src/label_studio_sdk/prompts/versions/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/label_studio_sdk/prompts/versions/client.py b/src/label_studio_sdk/prompts/versions/client.py
new file mode 100644
index 000000000..20a88935b
--- /dev/null
+++ b/src/label_studio_sdk/prompts/versions/client.py
@@ -0,0 +1,245 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from json.decoder import JSONDecodeError
+
+from ...core.api_error import ApiError
+from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ...core.jsonable_encoder import jsonable_encoder
+from ...core.pydantic_utilities import pydantic_v1
+from ...core.request_options import RequestOptions
+from ...types.prompt_run import PromptRun
+from ...types.prompt_version import PromptVersion
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class VersionsClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def create(
+ self, id: int, *, request: PromptVersion, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptVersion:
+ """
+ Create a new version of a prompt.
+
+ Parameters
+ ----------
+ id : int
+ Prompt ID
+
+ request : PromptVersion
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptVersion
+
+
+ Examples
+ --------
+ from label_studio_sdk import PromptVersion
+ from label_studio_sdk.client import LabelStudio
+
+ client = LabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.versions.create(
+ id=1,
+ request=PromptVersion(
+ title="title",
+ prompt="prompt",
+ provider="OpenAI",
+ provider_model_id="provider_model_id",
+ ),
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"api/prompts/{jsonable_encoder(id)}/versions",
+ method="POST",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(PromptVersion, _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ def run(
+ self, id: int, version_id: int, *, request: PromptRun, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptRun:
+ """
+ Run a prompt.
+
+ Parameters
+ ----------
+ id : int
+ Prompt ID
+
+ version_id : int
+ Prompt Version ID
+
+ request : PromptRun
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptRun
+
+
+ Examples
+ --------
+ from label_studio_sdk import PromptRun
+ from label_studio_sdk.client import LabelStudio
+
+ client = LabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ client.prompts.versions.run(
+ id=1,
+ version_id=1,
+ request=PromptRun(
+ project=1,
+ project_subset="All",
+ ),
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ f"api/prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/inference-runs",
+ method="POST",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(PromptRun, _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncVersionsClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def create(
+ self, id: int, *, request: PromptVersion, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptVersion:
+ """
+ Create a new version of a prompt.
+
+ Parameters
+ ----------
+ id : int
+ Prompt ID
+
+ request : PromptVersion
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptVersion
+
+
+ Examples
+ --------
+ from label_studio_sdk import PromptVersion
+ from label_studio_sdk.client import AsyncLabelStudio
+
+ client = AsyncLabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ await client.prompts.versions.create(
+ id=1,
+ request=PromptVersion(
+ title="title",
+ prompt="prompt",
+ provider="OpenAI",
+ provider_model_id="provider_model_id",
+ ),
+ )
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"api/prompts/{jsonable_encoder(id)}/versions",
+ method="POST",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(PromptVersion, _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+ async def run(
+ self, id: int, version_id: int, *, request: PromptRun, request_options: typing.Optional[RequestOptions] = None
+ ) -> PromptRun:
+ """
+ Run a prompt.
+
+ Parameters
+ ----------
+ id : int
+ Prompt ID
+
+ version_id : int
+ Prompt Version ID
+
+ request : PromptRun
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ PromptRun
+
+
+ Examples
+ --------
+ from label_studio_sdk import PromptRun
+ from label_studio_sdk.client import AsyncLabelStudio
+
+ client = AsyncLabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ await client.prompts.versions.run(
+ id=1,
+ version_id=1,
+ request=PromptRun(
+ project=1,
+ project_subset="All",
+ ),
+ )
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ f"api/prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/inference-runs",
+ method="POST",
+ json=request,
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(PromptRun, _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/label_studio_sdk/types/__init__.py b/src/label_studio_sdk/types/__init__.py
index d0b336a29..b92018493 100644
--- a/src/label_studio_sdk/types/__init__.py
+++ b/src/label_studio_sdk/types/__init__.py
@@ -50,6 +50,15 @@
from .prompt import Prompt
from .prompt_created_by import PromptCreatedBy
from .prompt_organization import PromptOrganization
+from .prompt_run import PromptRun
+from .prompt_run_created_by import PromptRunCreatedBy
+from .prompt_run_organization import PromptRunOrganization
+from .prompt_run_project_subset import PromptRunProjectSubset
+from .prompt_run_status import PromptRunStatus
+from .prompt_version import PromptVersion
+from .prompt_version_created_by import PromptVersionCreatedBy
+from .prompt_version_organization import PromptVersionOrganization
+from .prompt_version_provider import PromptVersionProvider
from .redis_export_storage import RedisExportStorage
from .redis_export_storage_status import RedisExportStorageStatus
from .redis_import_storage import RedisImportStorage
@@ -125,6 +134,15 @@
"Prompt",
"PromptCreatedBy",
"PromptOrganization",
+ "PromptRun",
+ "PromptRunCreatedBy",
+ "PromptRunOrganization",
+ "PromptRunProjectSubset",
+ "PromptRunStatus",
+ "PromptVersion",
+ "PromptVersionCreatedBy",
+ "PromptVersionOrganization",
+ "PromptVersionProvider",
"RedisExportStorage",
"RedisExportStorageStatus",
"RedisImportStorage",
diff --git a/src/label_studio_sdk/types/prompt_run.py b/src/label_studio_sdk/types/prompt_run.py
new file mode 100644
index 000000000..113b32247
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt_run.py
@@ -0,0 +1,46 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .prompt_run_created_by import PromptRunCreatedBy
+from .prompt_run_organization import PromptRunOrganization
+from .prompt_run_project_subset import PromptRunProjectSubset
+from .prompt_run_status import PromptRunStatus
+
+
+class PromptRun(pydantic_v1.BaseModel):
+ organization: typing.Optional[PromptRunOrganization] = None
+ project: int
+ model_version: typing.Optional[str] = None
+ created_by: typing.Optional[PromptRunCreatedBy] = None
+ project_subset: PromptRunProjectSubset
+ status: typing.Optional[PromptRunStatus] = None
+ job_id: typing.Optional[str] = None
+ total_predictions: typing.Optional[int] = None
+ total_correct_predictions: typing.Optional[int] = None
+ total_tasks: typing.Optional[int] = None
+ created_at: typing.Optional[dt.datetime] = None
+ triggered_at: typing.Optional[dt.datetime] = None
+ predictions_updated_at: typing.Optional[dt.datetime] = None
+ completed_at: typing.Optional[dt.datetime] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/label_studio_sdk/types/prompt_run_created_by.py b/src/label_studio_sdk/types/prompt_run_created_by.py
new file mode 100644
index 000000000..59a4ff902
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt_run_created_by.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PromptRunCreatedBy = typing.Union[int, typing.Dict[str, typing.Any]]
diff --git a/src/label_studio_sdk/types/prompt_run_organization.py b/src/label_studio_sdk/types/prompt_run_organization.py
new file mode 100644
index 000000000..6938a05af
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt_run_organization.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PromptRunOrganization = typing.Union[int, typing.Dict[str, typing.Any]]
diff --git a/src/label_studio_sdk/types/prompt_run_project_subset.py b/src/label_studio_sdk/types/prompt_run_project_subset.py
new file mode 100644
index 000000000..f68617178
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt_run_project_subset.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PromptRunProjectSubset = typing.Union[typing.Literal["All", "HasGT", "Sample"], typing.Any]
diff --git a/src/label_studio_sdk/types/prompt_run_status.py b/src/label_studio_sdk/types/prompt_run_status.py
new file mode 100644
index 000000000..83c72b472
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt_run_status.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PromptRunStatus = typing.Union[typing.Literal["Pending", "InProgress", "Completed", "Failed", "Canceled"], typing.Any]
diff --git a/src/label_studio_sdk/types/prompt_version.py b/src/label_studio_sdk/types/prompt_version.py
new file mode 100644
index 000000000..786f39b51
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt_version.py
@@ -0,0 +1,40 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .prompt_version_created_by import PromptVersionCreatedBy
+from .prompt_version_organization import PromptVersionOrganization
+from .prompt_version_provider import PromptVersionProvider
+
+
+class PromptVersion(pydantic_v1.BaseModel):
+ title: str
+ parent_model: typing.Optional[int] = None
+ prompt: str
+ provider: PromptVersionProvider
+ provider_model_id: str
+ created_by: typing.Optional[PromptVersionCreatedBy] = None
+ created_at: typing.Optional[dt.datetime] = None
+ updated_at: typing.Optional[dt.datetime] = None
+ organization: typing.Optional[PromptVersionOrganization] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/label_studio_sdk/types/prompt_version_created_by.py b/src/label_studio_sdk/types/prompt_version_created_by.py
new file mode 100644
index 000000000..a0e0d8668
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt_version_created_by.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PromptVersionCreatedBy = typing.Union[int, typing.Dict[str, typing.Any]]
diff --git a/src/label_studio_sdk/types/prompt_version_organization.py b/src/label_studio_sdk/types/prompt_version_organization.py
new file mode 100644
index 000000000..28c02e65d
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt_version_organization.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PromptVersionOrganization = typing.Union[int, typing.Dict[str, typing.Any]]
diff --git a/src/label_studio_sdk/types/prompt_version_provider.py b/src/label_studio_sdk/types/prompt_version_provider.py
new file mode 100644
index 000000000..82213666c
--- /dev/null
+++ b/src/label_studio_sdk/types/prompt_version_provider.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+PromptVersionProvider = typing.Union[typing.Literal["OpenAI", "AzureOpenAI"], typing.Any]
diff --git a/tests/prompts/__init__.py b/tests/prompts/__init__.py
new file mode 100644
index 000000000..f3ea2659b
--- /dev/null
+++ b/tests/prompts/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/tests/prompts/test_versions.py b/tests/prompts/test_versions.py
new file mode 100644
index 000000000..0243980bc
--- /dev/null
+++ b/tests/prompts/test_versions.py
@@ -0,0 +1,86 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from label_studio_sdk import PromptRun, PromptVersion
+from label_studio_sdk.client import AsyncLabelStudio, LabelStudio
+
+from ..utilities import validate_response
+
+
+async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
+ expected_response: typing.Any = {
+ "title": "title",
+ "parent_model": 1,
+ "prompt": "prompt",
+ "provider": "OpenAI",
+ "provider_model_id": "provider_model_id",
+ "created_by": 1,
+ "created_at": "2024-01-15T09:30:00Z",
+ "updated_at": "2024-01-15T09:30:00Z",
+ "organization": 1,
+ }
+ expected_types: typing.Any = {
+ "title": None,
+ "parent_model": "integer",
+ "prompt": None,
+ "provider": None,
+ "provider_model_id": None,
+ "created_by": "integer",
+ "created_at": "datetime",
+ "updated_at": "datetime",
+ "organization": "integer",
+ }
+ response = client.prompts.versions.create(
+ id=1,
+ request=PromptVersion(title="title", prompt="prompt", provider="OpenAI", provider_model_id="provider_model_id"),
+ )
+ validate_response(response, expected_response, expected_types)
+
+ async_response = await async_client.prompts.versions.create(
+ id=1,
+ request=PromptVersion(title="title", prompt="prompt", provider="OpenAI", provider_model_id="provider_model_id"),
+ )
+ validate_response(async_response, expected_response, expected_types)
+
+
+async def test_run(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
+ expected_response: typing.Any = {
+ "organization": 1,
+ "project": 1,
+ "model_version": "model_version",
+ "created_by": 1,
+ "project_subset": "All",
+ "status": "Pending",
+ "job_id": "job_id",
+ "total_predictions": 1,
+ "total_correct_predictions": 1,
+ "total_tasks": 1,
+ "created_at": "2024-01-15T09:30:00Z",
+ "triggered_at": "2024-01-15T09:30:00Z",
+ "predictions_updated_at": "2024-01-15T09:30:00Z",
+ "completed_at": "2024-01-15T09:30:00Z",
+ }
+ expected_types: typing.Any = {
+ "organization": "integer",
+ "project": "integer",
+ "model_version": None,
+ "created_by": "integer",
+ "project_subset": None,
+ "status": None,
+ "job_id": None,
+ "total_predictions": "integer",
+ "total_correct_predictions": "integer",
+ "total_tasks": "integer",
+ "created_at": "datetime",
+ "triggered_at": "datetime",
+ "predictions_updated_at": "datetime",
+ "completed_at": "datetime",
+ }
+ response = client.prompts.versions.run(id=1, version_id=1, request=PromptRun(project=1, project_subset="All"))
+ validate_response(response, expected_response, expected_types)
+
+ async_response = await async_client.prompts.versions.run(
+ id=1, version_id=1, request=PromptRun(project=1, project_subset="All")
+ )
+ validate_response(async_response, expected_response, expected_types)
From 200473a7a3ece9696feb9a61b5a7974d44231b86 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Wed, 14 Aug 2024 21:45:31 +0000
Subject: [PATCH 06/14] SDK regeneration
---
.mock/definition/prompts/versions.yml | 2 +-
reference.md | 4 ++--
src/label_studio_sdk/prompts/versions/client.py | 8 ++++----
tests/prompts/test_versions.py | 8 +++++---
4 files changed, 12 insertions(+), 10 deletions(-)
diff --git a/.mock/definition/prompts/versions.yml b/.mock/definition/prompts/versions.yml
index 5c44f0e89..1da2fb876 100644
--- a/.mock/definition/prompts/versions.yml
+++ b/.mock/definition/prompts/versions.yml
@@ -41,7 +41,7 @@ service:
organization: 1
audiences:
- public
- run:
+ create_run:
path: /api/prompts/{id}/versions/{version_id}/inference-runs
method: POST
auth: true
diff --git a/reference.md b/reference.md
index 0fe5b5fa1..40d4b16f8 100644
--- a/reference.md
+++ b/reference.md
@@ -13941,7 +13941,7 @@ client.prompts.versions.create(
-client.prompts.versions.run(...)
+client.prompts.versions.create_run(...)
-
@@ -13974,7 +13974,7 @@ from label_studio_sdk.client import LabelStudio
client = LabelStudio(
api_key="YOUR_API_KEY",
)
-client.prompts.versions.run(
+client.prompts.versions.create_run(
id=1,
version_id=1,
request=PromptRun(
diff --git a/src/label_studio_sdk/prompts/versions/client.py b/src/label_studio_sdk/prompts/versions/client.py
index 20a88935b..93be4d919 100644
--- a/src/label_studio_sdk/prompts/versions/client.py
+++ b/src/label_studio_sdk/prompts/versions/client.py
@@ -73,7 +73,7 @@ def create(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def run(
+ def create_run(
self, id: int, version_id: int, *, request: PromptRun, request_options: typing.Optional[RequestOptions] = None
) -> PromptRun:
"""
@@ -105,7 +105,7 @@ def run(
client = LabelStudio(
api_key="YOUR_API_KEY",
)
- client.prompts.versions.run(
+ client.prompts.versions.create_run(
id=1,
version_id=1,
request=PromptRun(
@@ -188,7 +188,7 @@ async def create(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def run(
+ async def create_run(
self, id: int, version_id: int, *, request: PromptRun, request_options: typing.Optional[RequestOptions] = None
) -> PromptRun:
"""
@@ -220,7 +220,7 @@ async def run(
client = AsyncLabelStudio(
api_key="YOUR_API_KEY",
)
- await client.prompts.versions.run(
+ await client.prompts.versions.create_run(
id=1,
version_id=1,
request=PromptRun(
diff --git a/tests/prompts/test_versions.py b/tests/prompts/test_versions.py
index 0243980bc..a57e29cf0 100644
--- a/tests/prompts/test_versions.py
+++ b/tests/prompts/test_versions.py
@@ -44,7 +44,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No
validate_response(async_response, expected_response, expected_types)
-async def test_run(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
+async def test_create_run(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
expected_response: typing.Any = {
"organization": 1,
"project": 1,
@@ -77,10 +77,12 @@ async def test_run(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
"predictions_updated_at": "datetime",
"completed_at": "datetime",
}
- response = client.prompts.versions.run(id=1, version_id=1, request=PromptRun(project=1, project_subset="All"))
+ response = client.prompts.versions.create_run(
+ id=1, version_id=1, request=PromptRun(project=1, project_subset="All")
+ )
validate_response(response, expected_response, expected_types)
- async_response = await async_client.prompts.versions.run(
+ async_response = await async_client.prompts.versions.create_run(
id=1, version_id=1, request=PromptRun(project=1, project_subset="All")
)
validate_response(async_response, expected_response, expected_types)
From f4557243e80c98c6d49b1678613a4a0f4cdf147c Mon Sep 17 00:00:00 2001
From: nik
Date: Thu, 15 Aug 2024 12:39:18 +0100
Subject: [PATCH 07/14] Add tag-agnostic create_regions function
---
.../label_interface/interface.py | 27 +++++++++++++++----
1 file changed, 22 insertions(+), 5 deletions(-)
diff --git a/src/label_studio_sdk/label_interface/interface.py b/src/label_studio_sdk/label_interface/interface.py
index 3408d6753..857ac9084 100644
--- a/src/label_studio_sdk/label_interface/interface.py
+++ b/src/label_studio_sdk/label_interface/interface.py
@@ -30,9 +30,11 @@
)
from .object_tags import ObjectTag
from .label_tags import LabelTag
-from .objects import AnnotationValue, TaskValue, PredictionValue
+from .objects import AnnotationValue, TaskValue, PredictionValue, Region
from . import create as CE
+logger = logging.getLogger(__name__)
+
dir_path = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(dir_path, "..", "_legacy", "schema", "label_config_schema.json")
@@ -250,8 +252,7 @@ def create_instance(cls, *args, **kwargs):
"""
config = cls.create(*args, **kwargs)
return cls(config=config, **kwargs)
-
-
+
def __init__(self, config: str, tags_mapping=None, *args, **kwargs):
"""
Initialize a LabelInterface instance using a config string.
@@ -299,9 +300,25 @@ def __init__(self, config: str, tags_mapping=None, *args, **kwargs):
self._labels = labels
self._tree = tree
-
+ def create_regions(self, data: Dict) -> List[Region]:
+ """
+ Takes raw data representation and maps keys to control tag names.
+ If name is not found, it will be skipped
+
+ Args:
+ data (Dict): Raw data representation. Example: {"choices_name": "Positive", "labels_name": [{"start": 0, "end": 10, "value": "person"}]}
+ raise_if_control_not_found (bool): Raise an exception if control tag is not found.
+ """
+ regions = []
+ for control_tag_name, payload in data.items():
+ if control_tag_name not in self._controls:
+ logger.info(f"Control tag '{control_tag_name}' not found in the config")
+ continue
+
+ control = self._controls[control_tag_name]
+ regions.append(control.label(**payload))
- ##### NEW API
+ return regions
@property
def config(self):
From a08c49e1617dab7bc237fd7820ce439741a2ab2f Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Thu, 15 Aug 2024 13:38:27 +0000
Subject: [PATCH 08/14] SDK regeneration
---
.mock/definition/__package__.yml | 18 +-
.mock/definition/prompts/versions.yml | 8 +-
poetry.lock | 10 +-
pyproject.toml | 2 +-
reference.md | 571 ++++++++++++++++--
src/label_studio_sdk/__init__.py | 20 +-
src/label_studio_sdk/files/client.py | 42 +-
src/label_studio_sdk/projects/client.py | 24 +-
.../projects/exports/client.py | 173 ++++--
src/label_studio_sdk/prompts/client.py | 137 ++++-
.../prompts/versions/client.py | 280 +++++++--
src/label_studio_sdk/types/__init__.py | 20 +-
.../types/{prompt_run.py => inference_run.py} | 18 +-
...ization.py => inference_run_created_by.py} | 2 +-
...ed_by.py => inference_run_organization.py} | 2 +-
.../types/inference_run_project_subset.py | 5 +
.../types/inference_run_status.py | 7 +
.../types/prompt_run_project_subset.py | 5 -
.../types/prompt_run_status.py | 5 -
src/label_studio_sdk/webhooks/client.py | 281 +++++++--
tests/projects/test_exports.py | 13 +-
tests/prompts/test_versions.py | 15 +-
tests/test_files.py | 5 +-
tests/test_projects.py | 7 +-
tests/test_prompts.py | 7 +-
tests/test_webhooks.py | 9 +-
26 files changed, 1365 insertions(+), 321 deletions(-)
rename src/label_studio_sdk/types/{prompt_run.py => inference_run.py} (74%)
rename src/label_studio_sdk/types/{prompt_run_organization.py => inference_run_created_by.py} (57%)
rename src/label_studio_sdk/types/{prompt_run_created_by.py => inference_run_organization.py} (51%)
create mode 100644 src/label_studio_sdk/types/inference_run_project_subset.py
create mode 100644 src/label_studio_sdk/types/inference_run_status.py
delete mode 100644 src/label_studio_sdk/types/prompt_run_project_subset.py
delete mode 100644 src/label_studio_sdk/types/prompt_run_status.py
diff --git a/.mock/definition/__package__.yml b/.mock/definition/__package__.yml
index 6de890cc2..ad3aca086 100644
--- a/.mock/definition/__package__.yml
+++ b/.mock/definition/__package__.yml
@@ -2343,28 +2343,28 @@ types:
organization: optional
source:
openapi: openapi/openapi.yaml
- PromptRunOrganization:
+ InferenceRunOrganization:
discriminated: false
union:
- integer
- map
source:
openapi: openapi/openapi.yaml
- PromptRunCreatedBy:
+ InferenceRunCreatedBy:
discriminated: false
union:
- integer
- map
source:
openapi: openapi/openapi.yaml
- PromptRunProjectSubset:
+ InferenceRunProjectSubset:
enum:
- All
- HasGT
- Sample
source:
openapi: openapi/openapi.yaml
- PromptRunStatus:
+ InferenceRunStatus:
enum:
- Pending
- InProgress
@@ -2373,14 +2373,14 @@ types:
- Canceled
source:
openapi: openapi/openapi.yaml
- PromptRun:
+ InferenceRun:
properties:
- organization: optional
+ organization: optional
project: integer
model_version: optional
- created_by: optional
- project_subset: PromptRunProjectSubset
- status: optional
+ created_by: optional
+ project_subset: InferenceRunProjectSubset
+ status: optional
job_id: optional
total_predictions: optional
total_correct_predictions: optional
diff --git a/.mock/definition/prompts/versions.yml b/.mock/definition/prompts/versions.yml
index 1da2fb876..55b3a7469 100644
--- a/.mock/definition/prompts/versions.yml
+++ b/.mock/definition/prompts/versions.yml
@@ -46,7 +46,7 @@ service:
method: POST
auth: true
docs: |
- Run a prompt.
+ Run a prompt inference.
path-parameters:
id:
type: integer
@@ -54,12 +54,12 @@ service:
version_id:
type: integer
docs: Prompt Version ID
- display-name: Run prompt
+ display-name: Run prompt inference
request:
- body: root.PromptRun
+ body: root.InferenceRun
response:
docs: ''
- type: root.PromptRun
+ type: root.InferenceRun
examples:
- path-parameters:
id: 1
diff --git a/poetry.lock b/poetry.lock
index 6690174bd..f4e1a8b4e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -676,13 +676,13 @@ files = [
[[package]]
name = "nltk"
-version = "3.8.2"
+version = "3.8.1"
description = "Natural Language Toolkit"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.7"
files = [
- {file = "nltk-3.8.2-py3-none-any.whl", hash = "sha256:bae044ae22ebe0b694a87c0012233373209f27d5c76d3572599c842740a62fe0"},
- {file = "nltk-3.8.2.tar.gz", hash = "sha256:9c051aa981c6745894906d5c3aad27417f3d1c10d91eefca50382fc922966f31"},
+ {file = "nltk-3.8.1-py3-none-any.whl", hash = "sha256:fd5c9109f976fa86bcadba8f91e47f5e9293bd034474752e92a520f81c93dda5"},
+ {file = "nltk-3.8.1.zip", hash = "sha256:1834da3d0682cba4f2cede2f9aad6b0fafb6461ba451db0efb6f9c39798d64d3"},
]
[package.dependencies]
@@ -1597,4 +1597,4 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools",
[metadata]
lock-version = "2.0"
python-versions = "^3.8"
-content-hash = "f2c8147564391bfc18ecd7453f7e2cdcbbc343f8e1eab32ce1966f9df74e1978"
+content-hash = "ca60e17c7aa3f1b33475d3fa20713f581799d6ea53bffe4059c232bc46e1c1d7"
diff --git a/pyproject.toml b/pyproject.toml
index cae3faa2a..5865894eb 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -38,7 +38,7 @@ httpx = ">=0.21.2"
ijson = ">=3.2.3"
jsonschema = ">=4.23.0"
lxml = ">=4.2.5"
-nltk = "^3.8.2"
+nltk = "^3.8.1"
numpy = "<2.0.0"
pandas = ">=0.24.0"
pydantic = ">= 1.9.2"
diff --git a/reference.md b/reference.md
index 40d4b16f8..8f5915f14 100644
--- a/reference.md
+++ b/reference.md
@@ -2022,15 +2022,13 @@ curl -H 'Authorization: Token abc123' \ -X POST 'https://localhost:8080/api/impo
-
```python
-from label_studio_sdk import FileUpload
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
api_key="YOUR_API_KEY",
)
client.files.update(
- id=1,
- request=FileUpload(),
+ id_=1,
)
```
@@ -2047,7 +2045,7 @@ client.files.update(
-
-**id:** `int` — A unique integer value identifying this file upload.
+**id_:** `int` — A unique integer value identifying this file upload.
@@ -2055,7 +2053,15 @@ client.files.update(
-
-**request:** `FileUpload`
+**id:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**file:** `typing.Optional[str]`
@@ -3134,7 +3140,6 @@ The project ID can be found in the URL when viewing the project in Label Studio,
-
```python
-from label_studio_sdk import ProjectLabelConfig
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
@@ -3142,9 +3147,7 @@ client = LabelStudio(
)
client.projects.validate_config(
id=1,
- request=ProjectLabelConfig(
- label_config="label_config",
- ),
+ label_config="label_config",
)
```
@@ -3169,7 +3172,7 @@ client.projects.validate_config(
-
-**request:** `ProjectLabelConfig`
+**label_config:** `str` — Label config in XML format. See more about it in documentation
@@ -4721,15 +4724,13 @@ For more information, see the [Label Studio documentation on exporting annotatio
-
```python
-from label_studio_sdk import ExportCreate
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
api_key="YOUR_API_KEY",
)
client.projects.exports.create(
- id=1,
- request=ExportCreate(),
+ id_=1,
)
```
@@ -4746,7 +4747,7 @@ client.projects.exports.create(
-
-**id:** `int` — A unique integer value identifying this project.
+**id_:** `int` — A unique integer value identifying this project.
@@ -4754,7 +4755,95 @@ client.projects.exports.create(
-
-**request:** `ExportCreate`
+**title:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**id:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**created_by:** `typing.Optional[UserSimple]`
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — Creation time
+
+
+
+
+
+-
+
+**finished_at:** `typing.Optional[dt.datetime]` — Complete or fail time
+
+
+
+
+
+-
+
+**status:** `typing.Optional[ExportCreateStatus]`
+
+
+
+
+
+-
+
+**md5:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**counters:** `typing.Optional[typing.Dict[str, typing.Any]]`
+
+
+
+
+
+-
+
+**converted_formats:** `typing.Optional[typing.Sequence[ConvertedFormat]]`
+
+
+
+
+
+-
+
+**task_filter_options:** `typing.Optional[TaskFilterOptions]`
+
+
+
+
+
+-
+
+**annotation_filter_options:** `typing.Optional[AnnotationFilterOptions]`
+
+
+
+
+
+-
+
+**serialization_options:** `typing.Optional[SerializationOptions]`
@@ -4971,7 +5060,6 @@ The project ID can be found in the URL when viewing the project in Label Studio,
-
```python
-from label_studio_sdk import ExportConvert
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
@@ -4980,9 +5068,7 @@ client = LabelStudio(
client.projects.exports.convert(
id=1,
export_pk="export_pk",
- request=ExportConvert(
- export_type="export_type",
- ),
+ export_type="export_type",
)
```
@@ -5015,7 +5101,7 @@ client.projects.exports.convert(
-
-**request:** `ExportConvert`
+**export_type:** `str` — Export file format.
@@ -13247,16 +13333,13 @@ If you want to create your own custom webhook, refer to [Create custom events fo
-
```python
-from label_studio_sdk import Webhook
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
api_key="YOUR_API_KEY",
)
client.webhooks.create(
- request=Webhook(
- url="url",
- ),
+ url="url",
)
```
@@ -13273,7 +13356,87 @@ client.webhooks.create(
-
-**request:** `Webhook`
+**url:** `str` — URL of webhook
+
+
+
+
+
+-
+
+**id:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**organization:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**project:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**send_payload:** `typing.Optional[bool]` — If value is False send only action
+
+
+
+
+
+-
+
+**send_for_all_actions:** `typing.Optional[bool]` — If value is False - used only for actions from WebhookAction
+
+
+
+
+
+-
+
+**headers:** `typing.Optional[typing.Dict[str, typing.Any]]` — Key Value Json of headers
+
+
+
+
+
+-
+
+**is_active:** `typing.Optional[bool]` — If value is False the webhook is disabled
+
+
+
+
+
+-
+
+**actions:** `typing.Optional[typing.Sequence[WebhookActionsItem]]`
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — Creation time
+
+
+
+
+
+-
+
+**updated_at:** `typing.Optional[dt.datetime]` — Last update time
@@ -13534,18 +13697,15 @@ For more information about webhooks, see [Set up webhooks in Label Studio](https
-
```python
-from label_studio_sdk import WebhookSerializerForUpdate
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
api_key="YOUR_API_KEY",
)
client.webhooks.update(
- id=1,
+ id_=1,
url="url",
- request=WebhookSerializerForUpdate(
- url="url",
- ),
+ webhook_serializer_for_update_url="url",
)
```
@@ -13562,7 +13722,7 @@ client.webhooks.update(
-
-**id:** `int` — A unique integer value identifying this webhook.
+**id_:** `int` — A unique integer value identifying this webhook.
@@ -13578,7 +13738,7 @@ client.webhooks.update(
-
-**request:** `WebhookSerializerForUpdate`
+**webhook_serializer_for_update_url:** `str` — URL of webhook
@@ -13631,6 +13791,86 @@ client.webhooks.update(
-
+**id:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**organization:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**project:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**webhook_serializer_for_update_send_payload:** `typing.Optional[bool]` — If value is False send only action
+
+
+
+
+
+-
+
+**webhook_serializer_for_update_send_for_all_actions:** `typing.Optional[bool]` — If value is False - used only for actions from WebhookAction
+
+
+
+
+
+-
+
+**webhook_serializer_for_update_headers:** `typing.Optional[typing.Dict[str, typing.Any]]` — Key Value Json of headers
+
+
+
+
+
+-
+
+**webhook_serializer_for_update_is_active:** `typing.Optional[bool]` — If value is False the webhook is disabled
+
+
+
+
+
+-
+
+**webhook_serializer_for_update_actions:** `typing.Optional[typing.Sequence[WebhookSerializerForUpdateActionsItem]]`
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — Creation time
+
+
+
+
+
+-
+
+**updated_at:** `typing.Optional[dt.datetime]` — Last update time
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -13731,18 +13971,15 @@ Create a new prompt.
-
```python
-from label_studio_sdk import Prompt
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
api_key="YOUR_API_KEY",
)
client.prompts.create(
- request=Prompt(
- title="title",
- input_fields=["input_fields"],
- output_classes=["output_classes"],
- ),
+ title="title",
+ input_fields=["input_fields"],
+ output_classes=["output_classes"],
)
```
@@ -13759,7 +13996,71 @@ client.prompts.create(
-
-**request:** `Prompt`
+**title:** `str` — Title of the prompt
+
+
+
+
+
+-
+
+**input_fields:** `typing.Sequence[str]` — List of input fields
+
+
+
+
+
+-
+
+**output_classes:** `typing.Sequence[str]` — List of output classes
+
+
+
+
+
+-
+
+**description:** `typing.Optional[str]` — Description of the prompt
+
+
+
+
+
+-
+
+**created_by:** `typing.Optional[PromptCreatedBy]` — User ID of the creator of the prompt
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]` — Date and time the prompt was created
+
+
+
+
+
+-
+
+**updated_at:** `typing.Optional[dt.datetime]` — Date and time the prompt was last updated
+
+
+
+
+
+-
+
+**organization:** `typing.Optional[PromptOrganization]` — Organization ID of the prompt
+
+
+
+
+
+-
+
+**associated_projects:** `typing.Optional[typing.Sequence[int]]` — List of associated projects IDs
@@ -13883,7 +14184,6 @@ Create a new version of a prompt.
-
```python
-from label_studio_sdk import PromptVersion
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
@@ -13891,12 +14191,10 @@ client = LabelStudio(
)
client.prompts.versions.create(
id=1,
- request=PromptVersion(
- title="title",
- prompt="prompt",
- provider="OpenAI",
- provider_model_id="provider_model_id",
- ),
+ title="title",
+ prompt="prompt",
+ provider="OpenAI",
+ provider_model_id="provider_model_id",
)
```
@@ -13921,7 +14219,71 @@ client.prompts.versions.create(
-
-**request:** `PromptVersion`
+**title:** `str`
+
+
+
+
+
+-
+
+**prompt:** `str`
+
+
+
+
+
+-
+
+**provider:** `PromptVersionProvider`
+
+
+
+
+
+-
+
+**provider_model_id:** `str`
+
+
+
+
+
+-
+
+**parent_model:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**created_by:** `typing.Optional[PromptVersionCreatedBy]`
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]`
+
+
+
+
+
+-
+
+**updated_at:** `typing.Optional[dt.datetime]`
+
+
+
+
+
+-
+
+**organization:** `typing.Optional[PromptVersionOrganization]`
@@ -13953,7 +14315,7 @@ client.prompts.versions.create(
-
-Run a prompt.
+Run a prompt inference.
@@ -13968,7 +14330,6 @@ Run a prompt.
-
```python
-from label_studio_sdk import PromptRun
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
@@ -13977,10 +14338,8 @@ client = LabelStudio(
client.prompts.versions.create_run(
id=1,
version_id=1,
- request=PromptRun(
- project=1,
- project_subset="All",
- ),
+ project=1,
+ project_subset="All",
)
```
@@ -14013,7 +14372,111 @@ client.prompts.versions.create_run(
-
-**request:** `PromptRun`
+**project:** `int`
+
+
+
+
+
+-
+
+**project_subset:** `InferenceRunProjectSubset`
+
+
+
+
+
+-
+
+**organization:** `typing.Optional[InferenceRunOrganization]`
+
+
+
+
+
+-
+
+**model_version:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**created_by:** `typing.Optional[InferenceRunCreatedBy]`
+
+
+
+
+
+-
+
+**status:** `typing.Optional[InferenceRunStatus]`
+
+
+
+
+
+-
+
+**job_id:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**total_predictions:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**total_correct_predictions:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**total_tasks:** `typing.Optional[int]`
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]`
+
+
+
+
+
+-
+
+**triggered_at:** `typing.Optional[dt.datetime]`
+
+
+
+
+
+-
+
+**predictions_updated_at:** `typing.Optional[dt.datetime]`
+
+
+
+
+
+-
+
+**completed_at:** `typing.Optional[dt.datetime]`
diff --git a/src/label_studio_sdk/__init__.py b/src/label_studio_sdk/__init__.py
index 7bc684417..f3c7a0cb7 100644
--- a/src/label_studio_sdk/__init__.py
+++ b/src/label_studio_sdk/__init__.py
@@ -34,6 +34,11 @@
GcsExportStorageStatus,
GcsImportStorage,
GcsImportStorageStatus,
+ InferenceRun,
+ InferenceRunCreatedBy,
+ InferenceRunOrganization,
+ InferenceRunProjectSubset,
+ InferenceRunStatus,
LocalFilesExportStorage,
LocalFilesExportStorageStatus,
LocalFilesImportStorage,
@@ -51,11 +56,6 @@
Prompt,
PromptCreatedBy,
PromptOrganization,
- PromptRun,
- PromptRunCreatedBy,
- PromptRunOrganization,
- PromptRunProjectSubset,
- PromptRunStatus,
PromptVersion,
PromptVersionCreatedBy,
PromptVersionOrganization,
@@ -201,6 +201,11 @@
"GcsImportStorage",
"GcsImportStorageStatus",
"ImportStorageListTypesResponseItem",
+ "InferenceRun",
+ "InferenceRunCreatedBy",
+ "InferenceRunOrganization",
+ "InferenceRunProjectSubset",
+ "InferenceRunStatus",
"InternalServerError",
"LabelStudioEnvironment",
"LocalFilesExportStorage",
@@ -230,11 +235,6 @@
"Prompt",
"PromptCreatedBy",
"PromptOrganization",
- "PromptRun",
- "PromptRunCreatedBy",
- "PromptRunOrganization",
- "PromptRunProjectSubset",
- "PromptRunStatus",
"PromptVersion",
"PromptVersionCreatedBy",
"PromptVersionOrganization",
diff --git a/src/label_studio_sdk/files/client.py b/src/label_studio_sdk/files/client.py
index a047936f5..4a68269a4 100644
--- a/src/label_studio_sdk/files/client.py
+++ b/src/label_studio_sdk/files/client.py
@@ -96,7 +96,12 @@ def delete(self, id: int, *, request_options: typing.Optional[RequestOptions] =
raise ApiError(status_code=_response.status_code, body=_response_json)
def update(
- self, id: int, *, request: FileUpload, request_options: typing.Optional[RequestOptions] = None
+ self,
+ id_: int,
+ *,
+ id: typing.Optional[int] = OMIT,
+ file: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
) -> FileUpload:
"""
Update a specific uploaded file. To get the file upload ID, use [Get files list](list).
@@ -109,10 +114,12 @@ def update(
Parameters
----------
- id : int
+ id_ : int
A unique integer value identifying this file upload.
- request : FileUpload
+ id : typing.Optional[int]
+
+ file : typing.Optional[str]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -124,21 +131,19 @@ def update(
Examples
--------
- from label_studio_sdk import FileUpload
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
api_key="YOUR_API_KEY",
)
client.files.update(
- id=1,
- request=FileUpload(),
+ id_=1,
)
"""
_response = self._client_wrapper.httpx_client.request(
- f"api/import/file-upload/{jsonable_encoder(id)}",
+ f"api/import/file-upload/{jsonable_encoder(id_)}",
method="PATCH",
- json=request,
+ json={"id": id, "file": file},
request_options=request_options,
omit=OMIT,
)
@@ -367,7 +372,12 @@ async def delete(self, id: int, *, request_options: typing.Optional[RequestOptio
raise ApiError(status_code=_response.status_code, body=_response_json)
async def update(
- self, id: int, *, request: FileUpload, request_options: typing.Optional[RequestOptions] = None
+ self,
+ id_: int,
+ *,
+ id: typing.Optional[int] = OMIT,
+ file: typing.Optional[str] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
) -> FileUpload:
"""
Update a specific uploaded file. To get the file upload ID, use [Get files list](list).
@@ -380,10 +390,12 @@ async def update(
Parameters
----------
- id : int
+ id_ : int
A unique integer value identifying this file upload.
- request : FileUpload
+ id : typing.Optional[int]
+
+ file : typing.Optional[str]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -395,21 +407,19 @@ async def update(
Examples
--------
- from label_studio_sdk import FileUpload
from label_studio_sdk.client import AsyncLabelStudio
client = AsyncLabelStudio(
api_key="YOUR_API_KEY",
)
await client.files.update(
- id=1,
- request=FileUpload(),
+ id_=1,
)
"""
_response = await self._client_wrapper.httpx_client.request(
- f"api/import/file-upload/{jsonable_encoder(id)}",
+ f"api/import/file-upload/{jsonable_encoder(id_)}",
method="PATCH",
- json=request,
+ json={"id": id, "file": file},
request_options=request_options,
omit=OMIT,
)
diff --git a/src/label_studio_sdk/projects/client.py b/src/label_studio_sdk/projects/client.py
index e880e8e18..5bb99bc20 100644
--- a/src/label_studio_sdk/projects/client.py
+++ b/src/label_studio_sdk/projects/client.py
@@ -577,7 +577,7 @@ def import_tasks(
raise ApiError(status_code=_response.status_code, body=_response_json)
def validate_config(
- self, id: int, *, request: ProjectLabelConfig, request_options: typing.Optional[RequestOptions] = None
+ self, id: int, *, label_config: str, request_options: typing.Optional[RequestOptions] = None
) -> ProjectLabelConfig:
"""
Determine whether the label configuration for a specific project is valid. For more information about setting up labeling configs, see [Configure labeling interface](https://labelstud.io/guide/setup) and our [Tags reference](https://labelstud.io/tags/).
@@ -589,7 +589,8 @@ def validate_config(
id : int
A unique integer value identifying this project.
- request : ProjectLabelConfig
+ label_config : str
+ Label config in XML format. See more about it in documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -601,7 +602,6 @@ def validate_config(
Examples
--------
- from label_studio_sdk import ProjectLabelConfig
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
@@ -609,15 +609,13 @@ def validate_config(
)
client.projects.validate_config(
id=1,
- request=ProjectLabelConfig(
- label_config="label_config",
- ),
+ label_config="label_config",
)
"""
_response = self._client_wrapper.httpx_client.request(
f"api/projects/{jsonable_encoder(id)}/validate/",
method="POST",
- json=request,
+ json={"label_config": label_config},
request_options=request_options,
omit=OMIT,
)
@@ -1185,7 +1183,7 @@ async def import_tasks(
raise ApiError(status_code=_response.status_code, body=_response_json)
async def validate_config(
- self, id: int, *, request: ProjectLabelConfig, request_options: typing.Optional[RequestOptions] = None
+ self, id: int, *, label_config: str, request_options: typing.Optional[RequestOptions] = None
) -> ProjectLabelConfig:
"""
Determine whether the label configuration for a specific project is valid. For more information about setting up labeling configs, see [Configure labeling interface](https://labelstud.io/guide/setup) and our [Tags reference](https://labelstud.io/tags/).
@@ -1197,7 +1195,8 @@ async def validate_config(
id : int
A unique integer value identifying this project.
- request : ProjectLabelConfig
+ label_config : str
+ Label config in XML format. See more about it in documentation
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -1209,7 +1208,6 @@ async def validate_config(
Examples
--------
- from label_studio_sdk import ProjectLabelConfig
from label_studio_sdk.client import AsyncLabelStudio
client = AsyncLabelStudio(
@@ -1217,15 +1215,13 @@ async def validate_config(
)
await client.projects.validate_config(
id=1,
- request=ProjectLabelConfig(
- label_config="label_config",
- ),
+ label_config="label_config",
)
"""
_response = await self._client_wrapper.httpx_client.request(
f"api/projects/{jsonable_encoder(id)}/validate/",
method="POST",
- json=request,
+ json={"label_config": label_config},
request_options=request_options,
omit=OMIT,
)
diff --git a/src/label_studio_sdk/projects/exports/client.py b/src/label_studio_sdk/projects/exports/client.py
index 131d0f05d..07a3f9e6f 100644
--- a/src/label_studio_sdk/projects/exports/client.py
+++ b/src/label_studio_sdk/projects/exports/client.py
@@ -1,5 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
+import datetime as dt
import typing
from json.decoder import JSONDecodeError
@@ -8,9 +9,15 @@
from ...core.jsonable_encoder import jsonable_encoder
from ...core.pydantic_utilities import pydantic_v1
from ...core.request_options import RequestOptions
+from ...types.annotation_filter_options import AnnotationFilterOptions
+from ...types.converted_format import ConvertedFormat
from ...types.export import Export
from ...types.export_convert import ExportConvert
from ...types.export_create import ExportCreate
+from ...types.export_create_status import ExportCreateStatus
+from ...types.serialization_options import SerializationOptions
+from ...types.task_filter_options import TaskFilterOptions
+from ...types.user_simple import UserSimple
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -199,7 +206,22 @@ def list(self, id: int, *, request_options: typing.Optional[RequestOptions] = No
raise ApiError(status_code=_response.status_code, body=_response_json)
def create(
- self, id: int, *, request: ExportCreate, request_options: typing.Optional[RequestOptions] = None
+ self,
+ id_: int,
+ *,
+ title: typing.Optional[str] = OMIT,
+ id: typing.Optional[int] = OMIT,
+ created_by: typing.Optional[UserSimple] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ finished_at: typing.Optional[dt.datetime] = OMIT,
+ status: typing.Optional[ExportCreateStatus] = OMIT,
+ md5: typing.Optional[str] = OMIT,
+ counters: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ converted_formats: typing.Optional[typing.Sequence[ConvertedFormat]] = OMIT,
+ task_filter_options: typing.Optional[TaskFilterOptions] = OMIT,
+ annotation_filter_options: typing.Optional[AnnotationFilterOptions] = OMIT,
+ serialization_options: typing.Optional[SerializationOptions] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
) -> ExportCreate:
"""
Create a new export request to start a background task and generate an export file (snapshot) for a specific project by ID. The project ID can be found in the URL when viewing the project in Label Studio, or you can retrieve all project IDs using [List all projects](../list).
@@ -210,10 +232,34 @@ def create(
Parameters
----------
- id : int
+ id_ : int
A unique integer value identifying this project.
- request : ExportCreate
+ title : typing.Optional[str]
+
+ id : typing.Optional[int]
+
+ created_by : typing.Optional[UserSimple]
+
+ created_at : typing.Optional[dt.datetime]
+ Creation time
+
+ finished_at : typing.Optional[dt.datetime]
+ Complete or fail time
+
+ status : typing.Optional[ExportCreateStatus]
+
+ md5 : typing.Optional[str]
+
+ counters : typing.Optional[typing.Dict[str, typing.Any]]
+
+ converted_formats : typing.Optional[typing.Sequence[ConvertedFormat]]
+
+ task_filter_options : typing.Optional[TaskFilterOptions]
+
+ annotation_filter_options : typing.Optional[AnnotationFilterOptions]
+
+ serialization_options : typing.Optional[SerializationOptions]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -225,21 +271,32 @@ def create(
Examples
--------
- from label_studio_sdk import ExportCreate
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
api_key="YOUR_API_KEY",
)
client.projects.exports.create(
- id=1,
- request=ExportCreate(),
+ id_=1,
)
"""
_response = self._client_wrapper.httpx_client.request(
- f"api/projects/{jsonable_encoder(id)}/exports/",
+ f"api/projects/{jsonable_encoder(id_)}/exports/",
method="POST",
- json=request,
+ json={
+ "title": title,
+ "id": id,
+ "created_by": created_by,
+ "created_at": created_at,
+ "finished_at": finished_at,
+ "status": status,
+ "md5": md5,
+ "counters": counters,
+ "converted_formats": converted_formats,
+ "task_filter_options": task_filter_options,
+ "annotation_filter_options": annotation_filter_options,
+ "serialization_options": serialization_options,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -347,12 +404,7 @@ def delete(self, id: int, export_pk: str, *, request_options: typing.Optional[Re
raise ApiError(status_code=_response.status_code, body=_response_json)
def convert(
- self,
- id: int,
- export_pk: str,
- *,
- request: ExportConvert,
- request_options: typing.Optional[RequestOptions] = None,
+ self, id: int, export_pk: str, *, export_type: str, request_options: typing.Optional[RequestOptions] = None
) -> ExportConvert:
"""
You can use this to convert an export snapshot into the selected format.
@@ -371,7 +423,8 @@ def convert(
export_pk : str
Primary key identifying the export file.
- request : ExportConvert
+ export_type : str
+ Export file format.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -383,7 +436,6 @@ def convert(
Examples
--------
- from label_studio_sdk import ExportConvert
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
@@ -392,15 +444,13 @@ def convert(
client.projects.exports.convert(
id=1,
export_pk="export_pk",
- request=ExportConvert(
- export_type="export_type",
- ),
+ export_type="export_type",
)
"""
_response = self._client_wrapper.httpx_client.request(
f"api/projects/{jsonable_encoder(id)}/exports/{jsonable_encoder(export_pk)}/convert",
method="POST",
- json=request,
+ json={"export_type": export_type},
request_options=request_options,
omit=OMIT,
)
@@ -657,7 +707,22 @@ async def list(self, id: int, *, request_options: typing.Optional[RequestOptions
raise ApiError(status_code=_response.status_code, body=_response_json)
async def create(
- self, id: int, *, request: ExportCreate, request_options: typing.Optional[RequestOptions] = None
+ self,
+ id_: int,
+ *,
+ title: typing.Optional[str] = OMIT,
+ id: typing.Optional[int] = OMIT,
+ created_by: typing.Optional[UserSimple] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ finished_at: typing.Optional[dt.datetime] = OMIT,
+ status: typing.Optional[ExportCreateStatus] = OMIT,
+ md5: typing.Optional[str] = OMIT,
+ counters: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ converted_formats: typing.Optional[typing.Sequence[ConvertedFormat]] = OMIT,
+ task_filter_options: typing.Optional[TaskFilterOptions] = OMIT,
+ annotation_filter_options: typing.Optional[AnnotationFilterOptions] = OMIT,
+ serialization_options: typing.Optional[SerializationOptions] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
) -> ExportCreate:
"""
Create a new export request to start a background task and generate an export file (snapshot) for a specific project by ID. The project ID can be found in the URL when viewing the project in Label Studio, or you can retrieve all project IDs using [List all projects](../list).
@@ -668,10 +733,34 @@ async def create(
Parameters
----------
- id : int
+ id_ : int
A unique integer value identifying this project.
- request : ExportCreate
+ title : typing.Optional[str]
+
+ id : typing.Optional[int]
+
+ created_by : typing.Optional[UserSimple]
+
+ created_at : typing.Optional[dt.datetime]
+ Creation time
+
+ finished_at : typing.Optional[dt.datetime]
+ Complete or fail time
+
+ status : typing.Optional[ExportCreateStatus]
+
+ md5 : typing.Optional[str]
+
+ counters : typing.Optional[typing.Dict[str, typing.Any]]
+
+ converted_formats : typing.Optional[typing.Sequence[ConvertedFormat]]
+
+ task_filter_options : typing.Optional[TaskFilterOptions]
+
+ annotation_filter_options : typing.Optional[AnnotationFilterOptions]
+
+ serialization_options : typing.Optional[SerializationOptions]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -683,21 +772,32 @@ async def create(
Examples
--------
- from label_studio_sdk import ExportCreate
from label_studio_sdk.client import AsyncLabelStudio
client = AsyncLabelStudio(
api_key="YOUR_API_KEY",
)
await client.projects.exports.create(
- id=1,
- request=ExportCreate(),
+ id_=1,
)
"""
_response = await self._client_wrapper.httpx_client.request(
- f"api/projects/{jsonable_encoder(id)}/exports/",
+ f"api/projects/{jsonable_encoder(id_)}/exports/",
method="POST",
- json=request,
+ json={
+ "title": title,
+ "id": id,
+ "created_by": created_by,
+ "created_at": created_at,
+ "finished_at": finished_at,
+ "status": status,
+ "md5": md5,
+ "counters": counters,
+ "converted_formats": converted_formats,
+ "task_filter_options": task_filter_options,
+ "annotation_filter_options": annotation_filter_options,
+ "serialization_options": serialization_options,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -805,12 +905,7 @@ async def delete(self, id: int, export_pk: str, *, request_options: typing.Optio
raise ApiError(status_code=_response.status_code, body=_response_json)
async def convert(
- self,
- id: int,
- export_pk: str,
- *,
- request: ExportConvert,
- request_options: typing.Optional[RequestOptions] = None,
+ self, id: int, export_pk: str, *, export_type: str, request_options: typing.Optional[RequestOptions] = None
) -> ExportConvert:
"""
You can use this to convert an export snapshot into the selected format.
@@ -829,7 +924,8 @@ async def convert(
export_pk : str
Primary key identifying the export file.
- request : ExportConvert
+ export_type : str
+ Export file format.
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -841,7 +937,6 @@ async def convert(
Examples
--------
- from label_studio_sdk import ExportConvert
from label_studio_sdk.client import AsyncLabelStudio
client = AsyncLabelStudio(
@@ -850,15 +945,13 @@ async def convert(
await client.projects.exports.convert(
id=1,
export_pk="export_pk",
- request=ExportConvert(
- export_type="export_type",
- ),
+ export_type="export_type",
)
"""
_response = await self._client_wrapper.httpx_client.request(
f"api/projects/{jsonable_encoder(id)}/exports/{jsonable_encoder(export_pk)}/convert",
method="POST",
- json=request,
+ json={"export_type": export_type},
request_options=request_options,
omit=OMIT,
)
diff --git a/src/label_studio_sdk/prompts/client.py b/src/label_studio_sdk/prompts/client.py
index db7db44ed..3d8cc39d0 100644
--- a/src/label_studio_sdk/prompts/client.py
+++ b/src/label_studio_sdk/prompts/client.py
@@ -1,5 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
+import datetime as dt
import typing
from json.decoder import JSONDecodeError
@@ -8,6 +9,8 @@
from ..core.pydantic_utilities import pydantic_v1
from ..core.request_options import RequestOptions
from ..types.prompt import Prompt
+from ..types.prompt_created_by import PromptCreatedBy
+from ..types.prompt_organization import PromptOrganization
from .types.prompts_batch_predictions_response import PromptsBatchPredictionsResponse
from .versions.client import AsyncVersionsClient, VersionsClient
@@ -54,13 +57,51 @@ def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> ty
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def create(self, *, request: Prompt, request_options: typing.Optional[RequestOptions] = None) -> Prompt:
+ def create(
+ self,
+ *,
+ title: str,
+ input_fields: typing.Sequence[str],
+ output_classes: typing.Sequence[str],
+ description: typing.Optional[str] = OMIT,
+ created_by: typing.Optional[PromptCreatedBy] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ updated_at: typing.Optional[dt.datetime] = OMIT,
+ organization: typing.Optional[PromptOrganization] = OMIT,
+ associated_projects: typing.Optional[typing.Sequence[int]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> Prompt:
"""
Create a new prompt.
Parameters
----------
- request : Prompt
+ title : str
+ Title of the prompt
+
+ input_fields : typing.Sequence[str]
+ List of input fields
+
+ output_classes : typing.Sequence[str]
+ List of output classes
+
+ description : typing.Optional[str]
+ Description of the prompt
+
+ created_by : typing.Optional[PromptCreatedBy]
+ User ID of the creator of the prompt
+
+ created_at : typing.Optional[dt.datetime]
+ Date and time the prompt was created
+
+ updated_at : typing.Optional[dt.datetime]
+ Date and time the prompt was last updated
+
+ organization : typing.Optional[PromptOrganization]
+ Organization ID of the prompt
+
+ associated_projects : typing.Optional[typing.Sequence[int]]
+ List of associated projects IDs
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -72,22 +113,33 @@ def create(self, *, request: Prompt, request_options: typing.Optional[RequestOpt
Examples
--------
- from label_studio_sdk import Prompt
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
api_key="YOUR_API_KEY",
)
client.prompts.create(
- request=Prompt(
- title="title",
- input_fields=["input_fields"],
- output_classes=["output_classes"],
- ),
+ title="title",
+ input_fields=["input_fields"],
+ output_classes=["output_classes"],
)
"""
_response = self._client_wrapper.httpx_client.request(
- "api/prompts/", method="POST", json=request, request_options=request_options, omit=OMIT
+ "api/prompts/",
+ method="POST",
+ json={
+ "title": title,
+ "description": description,
+ "created_by": created_by,
+ "created_at": created_at,
+ "updated_at": updated_at,
+ "organization": organization,
+ "input_fields": input_fields,
+ "output_classes": output_classes,
+ "associated_projects": associated_projects,
+ },
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
@@ -185,13 +237,51 @@ async def list(self, *, request_options: typing.Optional[RequestOptions] = None)
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def create(self, *, request: Prompt, request_options: typing.Optional[RequestOptions] = None) -> Prompt:
+ async def create(
+ self,
+ *,
+ title: str,
+ input_fields: typing.Sequence[str],
+ output_classes: typing.Sequence[str],
+ description: typing.Optional[str] = OMIT,
+ created_by: typing.Optional[PromptCreatedBy] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ updated_at: typing.Optional[dt.datetime] = OMIT,
+ organization: typing.Optional[PromptOrganization] = OMIT,
+ associated_projects: typing.Optional[typing.Sequence[int]] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> Prompt:
"""
Create a new prompt.
Parameters
----------
- request : Prompt
+ title : str
+ Title of the prompt
+
+ input_fields : typing.Sequence[str]
+ List of input fields
+
+ output_classes : typing.Sequence[str]
+ List of output classes
+
+ description : typing.Optional[str]
+ Description of the prompt
+
+ created_by : typing.Optional[PromptCreatedBy]
+ User ID of the creator of the prompt
+
+ created_at : typing.Optional[dt.datetime]
+ Date and time the prompt was created
+
+ updated_at : typing.Optional[dt.datetime]
+ Date and time the prompt was last updated
+
+ organization : typing.Optional[PromptOrganization]
+ Organization ID of the prompt
+
+ associated_projects : typing.Optional[typing.Sequence[int]]
+ List of associated projects IDs
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -203,22 +293,33 @@ async def create(self, *, request: Prompt, request_options: typing.Optional[Requ
Examples
--------
- from label_studio_sdk import Prompt
from label_studio_sdk.client import AsyncLabelStudio
client = AsyncLabelStudio(
api_key="YOUR_API_KEY",
)
await client.prompts.create(
- request=Prompt(
- title="title",
- input_fields=["input_fields"],
- output_classes=["output_classes"],
- ),
+ title="title",
+ input_fields=["input_fields"],
+ output_classes=["output_classes"],
)
"""
_response = await self._client_wrapper.httpx_client.request(
- "api/prompts/", method="POST", json=request, request_options=request_options, omit=OMIT
+ "api/prompts/",
+ method="POST",
+ json={
+ "title": title,
+ "description": description,
+ "created_by": created_by,
+ "created_at": created_at,
+ "updated_at": updated_at,
+ "organization": organization,
+ "input_fields": input_fields,
+ "output_classes": output_classes,
+ "associated_projects": associated_projects,
+ },
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
diff --git a/src/label_studio_sdk/prompts/versions/client.py b/src/label_studio_sdk/prompts/versions/client.py
index 93be4d919..2fc52b0ec 100644
--- a/src/label_studio_sdk/prompts/versions/client.py
+++ b/src/label_studio_sdk/prompts/versions/client.py
@@ -1,5 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
+import datetime as dt
import typing
from json.decoder import JSONDecodeError
@@ -8,8 +9,15 @@
from ...core.jsonable_encoder import jsonable_encoder
from ...core.pydantic_utilities import pydantic_v1
from ...core.request_options import RequestOptions
-from ...types.prompt_run import PromptRun
+from ...types.inference_run import InferenceRun
+from ...types.inference_run_created_by import InferenceRunCreatedBy
+from ...types.inference_run_organization import InferenceRunOrganization
+from ...types.inference_run_project_subset import InferenceRunProjectSubset
+from ...types.inference_run_status import InferenceRunStatus
from ...types.prompt_version import PromptVersion
+from ...types.prompt_version_created_by import PromptVersionCreatedBy
+from ...types.prompt_version_organization import PromptVersionOrganization
+from ...types.prompt_version_provider import PromptVersionProvider
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -20,7 +28,19 @@ def __init__(self, *, client_wrapper: SyncClientWrapper):
self._client_wrapper = client_wrapper
def create(
- self, id: int, *, request: PromptVersion, request_options: typing.Optional[RequestOptions] = None
+ self,
+ id: int,
+ *,
+ title: str,
+ prompt: str,
+ provider: PromptVersionProvider,
+ provider_model_id: str,
+ parent_model: typing.Optional[int] = OMIT,
+ created_by: typing.Optional[PromptVersionCreatedBy] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ updated_at: typing.Optional[dt.datetime] = OMIT,
+ organization: typing.Optional[PromptVersionOrganization] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
) -> PromptVersion:
"""
Create a new version of a prompt.
@@ -30,7 +50,23 @@ def create(
id : int
Prompt ID
- request : PromptVersion
+ title : str
+
+ prompt : str
+
+ provider : PromptVersionProvider
+
+ provider_model_id : str
+
+ parent_model : typing.Optional[int]
+
+ created_by : typing.Optional[PromptVersionCreatedBy]
+
+ created_at : typing.Optional[dt.datetime]
+
+ updated_at : typing.Optional[dt.datetime]
+
+ organization : typing.Optional[PromptVersionOrganization]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -42,7 +78,6 @@ def create(
Examples
--------
- from label_studio_sdk import PromptVersion
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
@@ -50,18 +85,26 @@ def create(
)
client.prompts.versions.create(
id=1,
- request=PromptVersion(
- title="title",
- prompt="prompt",
- provider="OpenAI",
- provider_model_id="provider_model_id",
- ),
+ title="title",
+ prompt="prompt",
+ provider="OpenAI",
+ provider_model_id="provider_model_id",
)
"""
_response = self._client_wrapper.httpx_client.request(
f"api/prompts/{jsonable_encoder(id)}/versions",
method="POST",
- json=request,
+ json={
+ "title": title,
+ "parent_model": parent_model,
+ "prompt": prompt,
+ "provider": provider,
+ "provider_model_id": provider_model_id,
+ "created_by": created_by,
+ "created_at": created_at,
+ "updated_at": updated_at,
+ "organization": organization,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -74,10 +117,28 @@ def create(
raise ApiError(status_code=_response.status_code, body=_response_json)
def create_run(
- self, id: int, version_id: int, *, request: PromptRun, request_options: typing.Optional[RequestOptions] = None
- ) -> PromptRun:
+ self,
+ id: int,
+ version_id: int,
+ *,
+ project: int,
+ project_subset: InferenceRunProjectSubset,
+ organization: typing.Optional[InferenceRunOrganization] = OMIT,
+ model_version: typing.Optional[str] = OMIT,
+ created_by: typing.Optional[InferenceRunCreatedBy] = OMIT,
+ status: typing.Optional[InferenceRunStatus] = OMIT,
+ job_id: typing.Optional[str] = OMIT,
+ total_predictions: typing.Optional[int] = OMIT,
+ total_correct_predictions: typing.Optional[int] = OMIT,
+ total_tasks: typing.Optional[int] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ triggered_at: typing.Optional[dt.datetime] = OMIT,
+ predictions_updated_at: typing.Optional[dt.datetime] = OMIT,
+ completed_at: typing.Optional[dt.datetime] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> InferenceRun:
"""
- Run a prompt.
+ Run a prompt inference.
Parameters
----------
@@ -87,19 +148,44 @@ def create_run(
version_id : int
Prompt Version ID
- request : PromptRun
+ project : int
+
+ project_subset : InferenceRunProjectSubset
+
+ organization : typing.Optional[InferenceRunOrganization]
+
+ model_version : typing.Optional[str]
+
+ created_by : typing.Optional[InferenceRunCreatedBy]
+
+ status : typing.Optional[InferenceRunStatus]
+
+ job_id : typing.Optional[str]
+
+ total_predictions : typing.Optional[int]
+
+ total_correct_predictions : typing.Optional[int]
+
+ total_tasks : typing.Optional[int]
+
+ created_at : typing.Optional[dt.datetime]
+
+ triggered_at : typing.Optional[dt.datetime]
+
+ predictions_updated_at : typing.Optional[dt.datetime]
+
+ completed_at : typing.Optional[dt.datetime]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- PromptRun
+ InferenceRun
Examples
--------
- from label_studio_sdk import PromptRun
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
@@ -108,22 +194,35 @@ def create_run(
client.prompts.versions.create_run(
id=1,
version_id=1,
- request=PromptRun(
- project=1,
- project_subset="All",
- ),
+ project=1,
+ project_subset="All",
)
"""
_response = self._client_wrapper.httpx_client.request(
f"api/prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/inference-runs",
method="POST",
- json=request,
+ json={
+ "organization": organization,
+ "project": project,
+ "model_version": model_version,
+ "created_by": created_by,
+ "project_subset": project_subset,
+ "status": status,
+ "job_id": job_id,
+ "total_predictions": total_predictions,
+ "total_correct_predictions": total_correct_predictions,
+ "total_tasks": total_tasks,
+ "created_at": created_at,
+ "triggered_at": triggered_at,
+ "predictions_updated_at": predictions_updated_at,
+ "completed_at": completed_at,
+ },
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return pydantic_v1.parse_obj_as(PromptRun, _response.json()) # type: ignore
+ return pydantic_v1.parse_obj_as(InferenceRun, _response.json()) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
@@ -135,7 +234,19 @@ def __init__(self, *, client_wrapper: AsyncClientWrapper):
self._client_wrapper = client_wrapper
async def create(
- self, id: int, *, request: PromptVersion, request_options: typing.Optional[RequestOptions] = None
+ self,
+ id: int,
+ *,
+ title: str,
+ prompt: str,
+ provider: PromptVersionProvider,
+ provider_model_id: str,
+ parent_model: typing.Optional[int] = OMIT,
+ created_by: typing.Optional[PromptVersionCreatedBy] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ updated_at: typing.Optional[dt.datetime] = OMIT,
+ organization: typing.Optional[PromptVersionOrganization] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
) -> PromptVersion:
"""
Create a new version of a prompt.
@@ -145,7 +256,23 @@ async def create(
id : int
Prompt ID
- request : PromptVersion
+ title : str
+
+ prompt : str
+
+ provider : PromptVersionProvider
+
+ provider_model_id : str
+
+ parent_model : typing.Optional[int]
+
+ created_by : typing.Optional[PromptVersionCreatedBy]
+
+ created_at : typing.Optional[dt.datetime]
+
+ updated_at : typing.Optional[dt.datetime]
+
+ organization : typing.Optional[PromptVersionOrganization]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -157,7 +284,6 @@ async def create(
Examples
--------
- from label_studio_sdk import PromptVersion
from label_studio_sdk.client import AsyncLabelStudio
client = AsyncLabelStudio(
@@ -165,18 +291,26 @@ async def create(
)
await client.prompts.versions.create(
id=1,
- request=PromptVersion(
- title="title",
- prompt="prompt",
- provider="OpenAI",
- provider_model_id="provider_model_id",
- ),
+ title="title",
+ prompt="prompt",
+ provider="OpenAI",
+ provider_model_id="provider_model_id",
)
"""
_response = await self._client_wrapper.httpx_client.request(
f"api/prompts/{jsonable_encoder(id)}/versions",
method="POST",
- json=request,
+ json={
+ "title": title,
+ "parent_model": parent_model,
+ "prompt": prompt,
+ "provider": provider,
+ "provider_model_id": provider_model_id,
+ "created_by": created_by,
+ "created_at": created_at,
+ "updated_at": updated_at,
+ "organization": organization,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -189,10 +323,28 @@ async def create(
raise ApiError(status_code=_response.status_code, body=_response_json)
async def create_run(
- self, id: int, version_id: int, *, request: PromptRun, request_options: typing.Optional[RequestOptions] = None
- ) -> PromptRun:
+ self,
+ id: int,
+ version_id: int,
+ *,
+ project: int,
+ project_subset: InferenceRunProjectSubset,
+ organization: typing.Optional[InferenceRunOrganization] = OMIT,
+ model_version: typing.Optional[str] = OMIT,
+ created_by: typing.Optional[InferenceRunCreatedBy] = OMIT,
+ status: typing.Optional[InferenceRunStatus] = OMIT,
+ job_id: typing.Optional[str] = OMIT,
+ total_predictions: typing.Optional[int] = OMIT,
+ total_correct_predictions: typing.Optional[int] = OMIT,
+ total_tasks: typing.Optional[int] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ triggered_at: typing.Optional[dt.datetime] = OMIT,
+ predictions_updated_at: typing.Optional[dt.datetime] = OMIT,
+ completed_at: typing.Optional[dt.datetime] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> InferenceRun:
"""
- Run a prompt.
+ Run a prompt inference.
Parameters
----------
@@ -202,19 +354,44 @@ async def create_run(
version_id : int
Prompt Version ID
- request : PromptRun
+ project : int
+
+ project_subset : InferenceRunProjectSubset
+
+ organization : typing.Optional[InferenceRunOrganization]
+
+ model_version : typing.Optional[str]
+
+ created_by : typing.Optional[InferenceRunCreatedBy]
+
+ status : typing.Optional[InferenceRunStatus]
+
+ job_id : typing.Optional[str]
+
+ total_predictions : typing.Optional[int]
+
+ total_correct_predictions : typing.Optional[int]
+
+ total_tasks : typing.Optional[int]
+
+ created_at : typing.Optional[dt.datetime]
+
+ triggered_at : typing.Optional[dt.datetime]
+
+ predictions_updated_at : typing.Optional[dt.datetime]
+
+ completed_at : typing.Optional[dt.datetime]
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
Returns
-------
- PromptRun
+ InferenceRun
Examples
--------
- from label_studio_sdk import PromptRun
from label_studio_sdk.client import AsyncLabelStudio
client = AsyncLabelStudio(
@@ -223,22 +400,35 @@ async def create_run(
await client.prompts.versions.create_run(
id=1,
version_id=1,
- request=PromptRun(
- project=1,
- project_subset="All",
- ),
+ project=1,
+ project_subset="All",
)
"""
_response = await self._client_wrapper.httpx_client.request(
f"api/prompts/{jsonable_encoder(id)}/versions/{jsonable_encoder(version_id)}/inference-runs",
method="POST",
- json=request,
+ json={
+ "organization": organization,
+ "project": project,
+ "model_version": model_version,
+ "created_by": created_by,
+ "project_subset": project_subset,
+ "status": status,
+ "job_id": job_id,
+ "total_predictions": total_predictions,
+ "total_correct_predictions": total_correct_predictions,
+ "total_tasks": total_tasks,
+ "created_at": created_at,
+ "triggered_at": triggered_at,
+ "predictions_updated_at": predictions_updated_at,
+ "completed_at": completed_at,
+ },
request_options=request_options,
omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
- return pydantic_v1.parse_obj_as(PromptRun, _response.json()) # type: ignore
+ return pydantic_v1.parse_obj_as(InferenceRun, _response.json()) # type: ignore
_response_json = _response.json()
except JSONDecodeError:
raise ApiError(status_code=_response.status_code, body=_response.text)
diff --git a/src/label_studio_sdk/types/__init__.py b/src/label_studio_sdk/types/__init__.py
index b92018493..984dd768c 100644
--- a/src/label_studio_sdk/types/__init__.py
+++ b/src/label_studio_sdk/types/__init__.py
@@ -33,6 +33,11 @@
from .gcs_export_storage_status import GcsExportStorageStatus
from .gcs_import_storage import GcsImportStorage
from .gcs_import_storage_status import GcsImportStorageStatus
+from .inference_run import InferenceRun
+from .inference_run_created_by import InferenceRunCreatedBy
+from .inference_run_organization import InferenceRunOrganization
+from .inference_run_project_subset import InferenceRunProjectSubset
+from .inference_run_status import InferenceRunStatus
from .local_files_export_storage import LocalFilesExportStorage
from .local_files_export_storage_status import LocalFilesExportStorageStatus
from .local_files_import_storage import LocalFilesImportStorage
@@ -50,11 +55,6 @@
from .prompt import Prompt
from .prompt_created_by import PromptCreatedBy
from .prompt_organization import PromptOrganization
-from .prompt_run import PromptRun
-from .prompt_run_created_by import PromptRunCreatedBy
-from .prompt_run_organization import PromptRunOrganization
-from .prompt_run_project_subset import PromptRunProjectSubset
-from .prompt_run_status import PromptRunStatus
from .prompt_version import PromptVersion
from .prompt_version_created_by import PromptVersionCreatedBy
from .prompt_version_organization import PromptVersionOrganization
@@ -117,6 +117,11 @@
"GcsExportStorageStatus",
"GcsImportStorage",
"GcsImportStorageStatus",
+ "InferenceRun",
+ "InferenceRunCreatedBy",
+ "InferenceRunOrganization",
+ "InferenceRunProjectSubset",
+ "InferenceRunStatus",
"LocalFilesExportStorage",
"LocalFilesExportStorageStatus",
"LocalFilesImportStorage",
@@ -134,11 +139,6 @@
"Prompt",
"PromptCreatedBy",
"PromptOrganization",
- "PromptRun",
- "PromptRunCreatedBy",
- "PromptRunOrganization",
- "PromptRunProjectSubset",
- "PromptRunStatus",
"PromptVersion",
"PromptVersionCreatedBy",
"PromptVersionOrganization",
diff --git a/src/label_studio_sdk/types/prompt_run.py b/src/label_studio_sdk/types/inference_run.py
similarity index 74%
rename from src/label_studio_sdk/types/prompt_run.py
rename to src/label_studio_sdk/types/inference_run.py
index 113b32247..427bcb6dd 100644
--- a/src/label_studio_sdk/types/prompt_run.py
+++ b/src/label_studio_sdk/types/inference_run.py
@@ -5,19 +5,19 @@
from ..core.datetime_utils import serialize_datetime
from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
-from .prompt_run_created_by import PromptRunCreatedBy
-from .prompt_run_organization import PromptRunOrganization
-from .prompt_run_project_subset import PromptRunProjectSubset
-from .prompt_run_status import PromptRunStatus
+from .inference_run_created_by import InferenceRunCreatedBy
+from .inference_run_organization import InferenceRunOrganization
+from .inference_run_project_subset import InferenceRunProjectSubset
+from .inference_run_status import InferenceRunStatus
-class PromptRun(pydantic_v1.BaseModel):
- organization: typing.Optional[PromptRunOrganization] = None
+class InferenceRun(pydantic_v1.BaseModel):
+ organization: typing.Optional[InferenceRunOrganization] = None
project: int
model_version: typing.Optional[str] = None
- created_by: typing.Optional[PromptRunCreatedBy] = None
- project_subset: PromptRunProjectSubset
- status: typing.Optional[PromptRunStatus] = None
+ created_by: typing.Optional[InferenceRunCreatedBy] = None
+ project_subset: InferenceRunProjectSubset
+ status: typing.Optional[InferenceRunStatus] = None
job_id: typing.Optional[str] = None
total_predictions: typing.Optional[int] = None
total_correct_predictions: typing.Optional[int] = None
diff --git a/src/label_studio_sdk/types/prompt_run_organization.py b/src/label_studio_sdk/types/inference_run_created_by.py
similarity index 57%
rename from src/label_studio_sdk/types/prompt_run_organization.py
rename to src/label_studio_sdk/types/inference_run_created_by.py
index 6938a05af..2da9ece87 100644
--- a/src/label_studio_sdk/types/prompt_run_organization.py
+++ b/src/label_studio_sdk/types/inference_run_created_by.py
@@ -2,4 +2,4 @@
import typing
-PromptRunOrganization = typing.Union[int, typing.Dict[str, typing.Any]]
+InferenceRunCreatedBy = typing.Union[int, typing.Dict[str, typing.Any]]
diff --git a/src/label_studio_sdk/types/prompt_run_created_by.py b/src/label_studio_sdk/types/inference_run_organization.py
similarity index 51%
rename from src/label_studio_sdk/types/prompt_run_created_by.py
rename to src/label_studio_sdk/types/inference_run_organization.py
index 59a4ff902..d430254f0 100644
--- a/src/label_studio_sdk/types/prompt_run_created_by.py
+++ b/src/label_studio_sdk/types/inference_run_organization.py
@@ -2,4 +2,4 @@
import typing
-PromptRunCreatedBy = typing.Union[int, typing.Dict[str, typing.Any]]
+InferenceRunOrganization = typing.Union[int, typing.Dict[str, typing.Any]]
diff --git a/src/label_studio_sdk/types/inference_run_project_subset.py b/src/label_studio_sdk/types/inference_run_project_subset.py
new file mode 100644
index 000000000..f0ae0442e
--- /dev/null
+++ b/src/label_studio_sdk/types/inference_run_project_subset.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+InferenceRunProjectSubset = typing.Union[typing.Literal["All", "HasGT", "Sample"], typing.Any]
diff --git a/src/label_studio_sdk/types/inference_run_status.py b/src/label_studio_sdk/types/inference_run_status.py
new file mode 100644
index 000000000..b832b23ad
--- /dev/null
+++ b/src/label_studio_sdk/types/inference_run_status.py
@@ -0,0 +1,7 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+InferenceRunStatus = typing.Union[
+ typing.Literal["Pending", "InProgress", "Completed", "Failed", "Canceled"], typing.Any
+]
diff --git a/src/label_studio_sdk/types/prompt_run_project_subset.py b/src/label_studio_sdk/types/prompt_run_project_subset.py
deleted file mode 100644
index f68617178..000000000
--- a/src/label_studio_sdk/types/prompt_run_project_subset.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-PromptRunProjectSubset = typing.Union[typing.Literal["All", "HasGT", "Sample"], typing.Any]
diff --git a/src/label_studio_sdk/types/prompt_run_status.py b/src/label_studio_sdk/types/prompt_run_status.py
deleted file mode 100644
index 83c72b472..000000000
--- a/src/label_studio_sdk/types/prompt_run_status.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-PromptRunStatus = typing.Union[typing.Literal["Pending", "InProgress", "Completed", "Failed", "Canceled"], typing.Any]
diff --git a/src/label_studio_sdk/webhooks/client.py b/src/label_studio_sdk/webhooks/client.py
index f77322b6d..2fa681c5f 100644
--- a/src/label_studio_sdk/webhooks/client.py
+++ b/src/label_studio_sdk/webhooks/client.py
@@ -1,5 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
+import datetime as dt
import typing
from json.decoder import JSONDecodeError
@@ -9,7 +10,9 @@
from ..core.pydantic_utilities import pydantic_v1
from ..core.request_options import RequestOptions
from ..types.webhook import Webhook
+from ..types.webhook_actions_item import WebhookActionsItem
from ..types.webhook_serializer_for_update import WebhookSerializerForUpdate
+from ..types.webhook_serializer_for_update_actions_item import WebhookSerializerForUpdateActionsItem
from .types.webhooks_update_request_actions_item import WebhooksUpdateRequestActionsItem
# this is used as the default value for optional parameters
@@ -63,7 +66,22 @@ def list(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- def create(self, *, request: Webhook, request_options: typing.Optional[RequestOptions] = None) -> Webhook:
+ def create(
+ self,
+ *,
+ url: str,
+ id: typing.Optional[int] = OMIT,
+ organization: typing.Optional[int] = OMIT,
+ project: typing.Optional[int] = OMIT,
+ send_payload: typing.Optional[bool] = OMIT,
+ send_for_all_actions: typing.Optional[bool] = OMIT,
+ headers: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ is_active: typing.Optional[bool] = OMIT,
+ actions: typing.Optional[typing.Sequence[WebhookActionsItem]] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ updated_at: typing.Optional[dt.datetime] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> Webhook:
"""
Create a webhook.
Label Studio provides several out-of-the box webhook events, which you can find listed here: [Available Label Studio webhooks](https://labelstud.io/guide/webhooks#Available-Label-Studio-webhooks).
@@ -74,7 +92,34 @@ def create(self, *, request: Webhook, request_options: typing.Optional[RequestOp
Parameters
----------
- request : Webhook
+ url : str
+ URL of webhook
+
+ id : typing.Optional[int]
+
+ organization : typing.Optional[int]
+
+ project : typing.Optional[int]
+
+ send_payload : typing.Optional[bool]
+ If value is False send only action
+
+ send_for_all_actions : typing.Optional[bool]
+ If value is False - used only for actions from WebhookAction
+
+ headers : typing.Optional[typing.Dict[str, typing.Any]]
+ Key Value Json of headers
+
+ is_active : typing.Optional[bool]
+ If value is False the webhook is disabled
+
+ actions : typing.Optional[typing.Sequence[WebhookActionsItem]]
+
+ created_at : typing.Optional[dt.datetime]
+ Creation time
+
+ updated_at : typing.Optional[dt.datetime]
+ Last update time
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -86,20 +131,33 @@ def create(self, *, request: Webhook, request_options: typing.Optional[RequestOp
Examples
--------
- from label_studio_sdk import Webhook
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
api_key="YOUR_API_KEY",
)
client.webhooks.create(
- request=Webhook(
- url="url",
- ),
+ url="url",
)
"""
_response = self._client_wrapper.httpx_client.request(
- "api/webhooks/", method="POST", json=request, request_options=request_options, omit=OMIT
+ "api/webhooks/",
+ method="POST",
+ json={
+ "id": id,
+ "organization": organization,
+ "project": project,
+ "url": url,
+ "send_payload": send_payload,
+ "send_for_all_actions": send_for_all_actions,
+ "headers": headers,
+ "is_active": is_active,
+ "actions": actions,
+ "created_at": created_at,
+ "updated_at": updated_at,
+ },
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
@@ -236,10 +294,10 @@ def delete(self, id: int, *, request_options: typing.Optional[RequestOptions] =
def update(
self,
- id: int,
+ id_: int,
*,
url: str,
- request: WebhookSerializerForUpdate,
+ webhook_serializer_for_update_url: str,
send_payload: typing.Optional[bool] = None,
send_for_all_actions: typing.Optional[bool] = None,
headers: typing.Optional[str] = None,
@@ -247,6 +305,18 @@ def update(
actions: typing.Optional[
typing.Union[WebhooksUpdateRequestActionsItem, typing.Sequence[WebhooksUpdateRequestActionsItem]]
] = None,
+ id: typing.Optional[int] = OMIT,
+ organization: typing.Optional[int] = OMIT,
+ project: typing.Optional[int] = OMIT,
+ webhook_serializer_for_update_send_payload: typing.Optional[bool] = OMIT,
+ webhook_serializer_for_update_send_for_all_actions: typing.Optional[bool] = OMIT,
+ webhook_serializer_for_update_headers: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ webhook_serializer_for_update_is_active: typing.Optional[bool] = OMIT,
+ webhook_serializer_for_update_actions: typing.Optional[
+ typing.Sequence[WebhookSerializerForUpdateActionsItem]
+ ] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ updated_at: typing.Optional[dt.datetime] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> WebhookSerializerForUpdate:
"""
@@ -256,13 +326,14 @@ def update(
Parameters
----------
- id : int
+ id_ : int
A unique integer value identifying this webhook.
url : str
URL of webhook
- request : WebhookSerializerForUpdate
+ webhook_serializer_for_update_url : str
+ URL of webhook
send_payload : typing.Optional[bool]
If value is False send only action
@@ -278,6 +349,32 @@ def update(
actions : typing.Optional[typing.Union[WebhooksUpdateRequestActionsItem, typing.Sequence[WebhooksUpdateRequestActionsItem]]]
+ id : typing.Optional[int]
+
+ organization : typing.Optional[int]
+
+ project : typing.Optional[int]
+
+ webhook_serializer_for_update_send_payload : typing.Optional[bool]
+ If value is False send only action
+
+ webhook_serializer_for_update_send_for_all_actions : typing.Optional[bool]
+ If value is False - used only for actions from WebhookAction
+
+ webhook_serializer_for_update_headers : typing.Optional[typing.Dict[str, typing.Any]]
+ Key Value Json of headers
+
+ webhook_serializer_for_update_is_active : typing.Optional[bool]
+ If value is False the webhook is disabled
+
+ webhook_serializer_for_update_actions : typing.Optional[typing.Sequence[WebhookSerializerForUpdateActionsItem]]
+
+ created_at : typing.Optional[dt.datetime]
+ Creation time
+
+ updated_at : typing.Optional[dt.datetime]
+ Last update time
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -288,22 +385,19 @@ def update(
Examples
--------
- from label_studio_sdk import WebhookSerializerForUpdate
from label_studio_sdk.client import LabelStudio
client = LabelStudio(
api_key="YOUR_API_KEY",
)
client.webhooks.update(
- id=1,
+ id_=1,
url="url",
- request=WebhookSerializerForUpdate(
- url="url",
- ),
+ webhook_serializer_for_update_url="url",
)
"""
_response = self._client_wrapper.httpx_client.request(
- f"api/webhooks/{jsonable_encoder(id)}/",
+ f"api/webhooks/{jsonable_encoder(id_)}/",
method="PATCH",
params={
"url": url,
@@ -313,7 +407,19 @@ def update(
"is_active": is_active,
"actions": actions,
},
- json=request,
+ json={
+ "id": id,
+ "organization": organization,
+ "project": project,
+ "url": url,
+ "send_payload": send_payload,
+ "send_for_all_actions": send_for_all_actions,
+ "headers": headers,
+ "is_active": is_active,
+ "actions": actions,
+ "created_at": created_at,
+ "updated_at": updated_at,
+ },
request_options=request_options,
omit=OMIT,
)
@@ -373,7 +479,22 @@ async def list(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
- async def create(self, *, request: Webhook, request_options: typing.Optional[RequestOptions] = None) -> Webhook:
+ async def create(
+ self,
+ *,
+ url: str,
+ id: typing.Optional[int] = OMIT,
+ organization: typing.Optional[int] = OMIT,
+ project: typing.Optional[int] = OMIT,
+ send_payload: typing.Optional[bool] = OMIT,
+ send_for_all_actions: typing.Optional[bool] = OMIT,
+ headers: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ is_active: typing.Optional[bool] = OMIT,
+ actions: typing.Optional[typing.Sequence[WebhookActionsItem]] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ updated_at: typing.Optional[dt.datetime] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> Webhook:
"""
Create a webhook.
Label Studio provides several out-of-the box webhook events, which you can find listed here: [Available Label Studio webhooks](https://labelstud.io/guide/webhooks#Available-Label-Studio-webhooks).
@@ -384,7 +505,34 @@ async def create(self, *, request: Webhook, request_options: typing.Optional[Req
Parameters
----------
- request : Webhook
+ url : str
+ URL of webhook
+
+ id : typing.Optional[int]
+
+ organization : typing.Optional[int]
+
+ project : typing.Optional[int]
+
+ send_payload : typing.Optional[bool]
+ If value is False send only action
+
+ send_for_all_actions : typing.Optional[bool]
+ If value is False - used only for actions from WebhookAction
+
+ headers : typing.Optional[typing.Dict[str, typing.Any]]
+ Key Value Json of headers
+
+ is_active : typing.Optional[bool]
+ If value is False the webhook is disabled
+
+ actions : typing.Optional[typing.Sequence[WebhookActionsItem]]
+
+ created_at : typing.Optional[dt.datetime]
+ Creation time
+
+ updated_at : typing.Optional[dt.datetime]
+ Last update time
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -396,20 +544,33 @@ async def create(self, *, request: Webhook, request_options: typing.Optional[Req
Examples
--------
- from label_studio_sdk import Webhook
from label_studio_sdk.client import AsyncLabelStudio
client = AsyncLabelStudio(
api_key="YOUR_API_KEY",
)
await client.webhooks.create(
- request=Webhook(
- url="url",
- ),
+ url="url",
)
"""
_response = await self._client_wrapper.httpx_client.request(
- "api/webhooks/", method="POST", json=request, request_options=request_options, omit=OMIT
+ "api/webhooks/",
+ method="POST",
+ json={
+ "id": id,
+ "organization": organization,
+ "project": project,
+ "url": url,
+ "send_payload": send_payload,
+ "send_for_all_actions": send_for_all_actions,
+ "headers": headers,
+ "is_active": is_active,
+ "actions": actions,
+ "created_at": created_at,
+ "updated_at": updated_at,
+ },
+ request_options=request_options,
+ omit=OMIT,
)
try:
if 200 <= _response.status_code < 300:
@@ -546,10 +707,10 @@ async def delete(self, id: int, *, request_options: typing.Optional[RequestOptio
async def update(
self,
- id: int,
+ id_: int,
*,
url: str,
- request: WebhookSerializerForUpdate,
+ webhook_serializer_for_update_url: str,
send_payload: typing.Optional[bool] = None,
send_for_all_actions: typing.Optional[bool] = None,
headers: typing.Optional[str] = None,
@@ -557,6 +718,18 @@ async def update(
actions: typing.Optional[
typing.Union[WebhooksUpdateRequestActionsItem, typing.Sequence[WebhooksUpdateRequestActionsItem]]
] = None,
+ id: typing.Optional[int] = OMIT,
+ organization: typing.Optional[int] = OMIT,
+ project: typing.Optional[int] = OMIT,
+ webhook_serializer_for_update_send_payload: typing.Optional[bool] = OMIT,
+ webhook_serializer_for_update_send_for_all_actions: typing.Optional[bool] = OMIT,
+ webhook_serializer_for_update_headers: typing.Optional[typing.Dict[str, typing.Any]] = OMIT,
+ webhook_serializer_for_update_is_active: typing.Optional[bool] = OMIT,
+ webhook_serializer_for_update_actions: typing.Optional[
+ typing.Sequence[WebhookSerializerForUpdateActionsItem]
+ ] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ updated_at: typing.Optional[dt.datetime] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> WebhookSerializerForUpdate:
"""
@@ -566,13 +739,14 @@ async def update(
Parameters
----------
- id : int
+ id_ : int
A unique integer value identifying this webhook.
url : str
URL of webhook
- request : WebhookSerializerForUpdate
+ webhook_serializer_for_update_url : str
+ URL of webhook
send_payload : typing.Optional[bool]
If value is False send only action
@@ -588,6 +762,32 @@ async def update(
actions : typing.Optional[typing.Union[WebhooksUpdateRequestActionsItem, typing.Sequence[WebhooksUpdateRequestActionsItem]]]
+ id : typing.Optional[int]
+
+ organization : typing.Optional[int]
+
+ project : typing.Optional[int]
+
+ webhook_serializer_for_update_send_payload : typing.Optional[bool]
+ If value is False send only action
+
+ webhook_serializer_for_update_send_for_all_actions : typing.Optional[bool]
+ If value is False - used only for actions from WebhookAction
+
+ webhook_serializer_for_update_headers : typing.Optional[typing.Dict[str, typing.Any]]
+ Key Value Json of headers
+
+ webhook_serializer_for_update_is_active : typing.Optional[bool]
+ If value is False the webhook is disabled
+
+ webhook_serializer_for_update_actions : typing.Optional[typing.Sequence[WebhookSerializerForUpdateActionsItem]]
+
+ created_at : typing.Optional[dt.datetime]
+ Creation time
+
+ updated_at : typing.Optional[dt.datetime]
+ Last update time
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -598,22 +798,19 @@ async def update(
Examples
--------
- from label_studio_sdk import WebhookSerializerForUpdate
from label_studio_sdk.client import AsyncLabelStudio
client = AsyncLabelStudio(
api_key="YOUR_API_KEY",
)
await client.webhooks.update(
- id=1,
+ id_=1,
url="url",
- request=WebhookSerializerForUpdate(
- url="url",
- ),
+ webhook_serializer_for_update_url="url",
)
"""
_response = await self._client_wrapper.httpx_client.request(
- f"api/webhooks/{jsonable_encoder(id)}/",
+ f"api/webhooks/{jsonable_encoder(id_)}/",
method="PATCH",
params={
"url": url,
@@ -623,7 +820,19 @@ async def update(
"is_active": is_active,
"actions": actions,
},
- json=request,
+ json={
+ "id": id,
+ "organization": organization,
+ "project": project,
+ "url": url,
+ "send_payload": send_payload,
+ "send_for_all_actions": send_for_all_actions,
+ "headers": headers,
+ "is_active": is_active,
+ "actions": actions,
+ "created_at": created_at,
+ "updated_at": updated_at,
+ },
request_options=request_options,
omit=OMIT,
)
diff --git a/tests/projects/test_exports.py b/tests/projects/test_exports.py
index 673209006..c17042c1e 100644
--- a/tests/projects/test_exports.py
+++ b/tests/projects/test_exports.py
@@ -2,7 +2,6 @@
import typing
-from label_studio_sdk import ExportConvert, ExportCreate
from label_studio_sdk.client import AsyncLabelStudio, LabelStudio
from ..utilities import validate_response
@@ -120,10 +119,10 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No
"interpolate_key_frames": None,
},
}
- response = client.projects.exports.create(id=1, request=ExportCreate())
+ response = client.projects.exports.create(id_=1)
validate_response(response, expected_response, expected_types)
- async_response = await async_client.projects.exports.create(id=1, request=ExportCreate())
+ async_response = await async_client.projects.exports.create(id_=1)
validate_response(async_response, expected_response, expected_types)
@@ -173,14 +172,10 @@ async def test_delete(client: LabelStudio, async_client: AsyncLabelStudio) -> No
async def test_convert(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
expected_response: typing.Any = {"export_type": "export_type"}
expected_types: typing.Any = {"export_type": None}
- response = client.projects.exports.convert(
- id=1, export_pk="export_pk", request=ExportConvert(export_type="export_type")
- )
+ response = client.projects.exports.convert(id=1, export_pk="export_pk", export_type="export_type")
validate_response(response, expected_response, expected_types)
- async_response = await async_client.projects.exports.convert(
- id=1, export_pk="export_pk", request=ExportConvert(export_type="export_type")
- )
+ async_response = await async_client.projects.exports.convert(id=1, export_pk="export_pk", export_type="export_type")
validate_response(async_response, expected_response, expected_types)
diff --git a/tests/prompts/test_versions.py b/tests/prompts/test_versions.py
index a57e29cf0..664039168 100644
--- a/tests/prompts/test_versions.py
+++ b/tests/prompts/test_versions.py
@@ -2,7 +2,6 @@
import typing
-from label_studio_sdk import PromptRun, PromptVersion
from label_studio_sdk.client import AsyncLabelStudio, LabelStudio
from ..utilities import validate_response
@@ -32,14 +31,12 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No
"organization": "integer",
}
response = client.prompts.versions.create(
- id=1,
- request=PromptVersion(title="title", prompt="prompt", provider="OpenAI", provider_model_id="provider_model_id"),
+ id=1, title="title", prompt="prompt", provider="OpenAI", provider_model_id="provider_model_id"
)
validate_response(response, expected_response, expected_types)
async_response = await async_client.prompts.versions.create(
- id=1,
- request=PromptVersion(title="title", prompt="prompt", provider="OpenAI", provider_model_id="provider_model_id"),
+ id=1, title="title", prompt="prompt", provider="OpenAI", provider_model_id="provider_model_id"
)
validate_response(async_response, expected_response, expected_types)
@@ -77,12 +74,8 @@ async def test_create_run(client: LabelStudio, async_client: AsyncLabelStudio) -
"predictions_updated_at": "datetime",
"completed_at": "datetime",
}
- response = client.prompts.versions.create_run(
- id=1, version_id=1, request=PromptRun(project=1, project_subset="All")
- )
+ response = client.prompts.versions.create_run(id=1, version_id=1, project=1, project_subset="All")
validate_response(response, expected_response, expected_types)
- async_response = await async_client.prompts.versions.create_run(
- id=1, version_id=1, request=PromptRun(project=1, project_subset="All")
- )
+ async_response = await async_client.prompts.versions.create_run(id=1, version_id=1, project=1, project_subset="All")
validate_response(async_response, expected_response, expected_types)
diff --git a/tests/test_files.py b/tests/test_files.py
index d1232b956..4dd052667 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -2,7 +2,6 @@
import typing
-from label_studio_sdk import FileUpload
from label_studio_sdk.client import AsyncLabelStudio, LabelStudio
from .utilities import validate_response
@@ -28,10 +27,10 @@ async def test_delete(client: LabelStudio, async_client: AsyncLabelStudio) -> No
async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
expected_response: typing.Any = {"id": 1, "file": "file"}
expected_types: typing.Any = {"id": "integer", "file": None}
- response = client.files.update(id=1, request=FileUpload())
+ response = client.files.update(id_=1)
validate_response(response, expected_response, expected_types)
- async_response = await async_client.files.update(id=1, request=FileUpload())
+ async_response = await async_client.files.update(id_=1)
validate_response(async_response, expected_response, expected_types)
diff --git a/tests/test_projects.py b/tests/test_projects.py
index 4de51931a..61376a911 100644
--- a/tests/test_projects.py
+++ b/tests/test_projects.py
@@ -2,7 +2,6 @@
import typing
-from label_studio_sdk import ProjectLabelConfig
from label_studio_sdk.client import AsyncLabelStudio, LabelStudio
from .utilities import validate_response
@@ -230,10 +229,8 @@ async def test_import_tasks(client: LabelStudio, async_client: AsyncLabelStudio)
async def test_validate_config(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
expected_response: typing.Any = {"label_config": "label_config"}
expected_types: typing.Any = {"label_config": None}
- response = client.projects.validate_config(id=1, request=ProjectLabelConfig(label_config="label_config"))
+ response = client.projects.validate_config(id=1, label_config="label_config")
validate_response(response, expected_response, expected_types)
- async_response = await async_client.projects.validate_config(
- id=1, request=ProjectLabelConfig(label_config="label_config")
- )
+ async_response = await async_client.projects.validate_config(id=1, label_config="label_config")
validate_response(async_response, expected_response, expected_types)
diff --git a/tests/test_prompts.py b/tests/test_prompts.py
index 45e217723..c9495480b 100644
--- a/tests/test_prompts.py
+++ b/tests/test_prompts.py
@@ -2,7 +2,6 @@
import typing
-from label_studio_sdk import Prompt
from label_studio_sdk.client import AsyncLabelStudio, LabelStudio
from .utilities import validate_response
@@ -68,13 +67,11 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No
"output_classes": ("list", {0: None}),
"associated_projects": ("list", {0: "integer"}),
}
- response = client.prompts.create(
- request=Prompt(title="title", input_fields=["input_fields"], output_classes=["output_classes"])
- )
+ response = client.prompts.create(title="title", input_fields=["input_fields"], output_classes=["output_classes"])
validate_response(response, expected_response, expected_types)
async_response = await async_client.prompts.create(
- request=Prompt(title="title", input_fields=["input_fields"], output_classes=["output_classes"])
+ title="title", input_fields=["input_fields"], output_classes=["output_classes"]
)
validate_response(async_response, expected_response, expected_types)
diff --git a/tests/test_webhooks.py b/tests/test_webhooks.py
index 8fa37e33c..39cd3b462 100644
--- a/tests/test_webhooks.py
+++ b/tests/test_webhooks.py
@@ -2,7 +2,6 @@
import typing
-from label_studio_sdk import Webhook, WebhookSerializerForUpdate
from label_studio_sdk.client import AsyncLabelStudio, LabelStudio
from .utilities import validate_response
@@ -76,10 +75,10 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No
"created_at": "datetime",
"updated_at": "datetime",
}
- response = client.webhooks.create(request=Webhook(url="url"))
+ response = client.webhooks.create(url="url")
validate_response(response, expected_response, expected_types)
- async_response = await async_client.webhooks.create(request=Webhook(url="url"))
+ async_response = await async_client.webhooks.create(url="url")
validate_response(async_response, expected_response, expected_types)
@@ -158,8 +157,8 @@ async def test_update(client: LabelStudio, async_client: AsyncLabelStudio) -> No
"created_at": "datetime",
"updated_at": "datetime",
}
- response = client.webhooks.update(id=1, url="url", request=WebhookSerializerForUpdate(url="url"))
+ response = client.webhooks.update(id_=1, url="url", webhook_serializer_for_update_url="url")
validate_response(response, expected_response, expected_types)
- async_response = await async_client.webhooks.update(id=1, url="url", request=WebhookSerializerForUpdate(url="url"))
+ async_response = await async_client.webhooks.update(id_=1, url="url", webhook_serializer_for_update_url="url")
validate_response(async_response, expected_response, expected_types)
From 69257f946d636799e4537a1f79c6cf9e92fa31eb Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Thu, 15 Aug 2024 16:04:39 +0000
Subject: [PATCH 09/14] SDK regeneration
---
.mock/definition/__package__.yml | 40 ++++
.mock/definition/model_providers.yml | 36 ++++
reference.md | 135 +++++++++++++
src/label_studio_sdk/__init__.py | 12 ++
src/label_studio_sdk/base_client.py | 3 +
.../model_providers/__init__.py | 2 +
.../model_providers/client.py | 190 ++++++++++++++++++
src/label_studio_sdk/types/__init__.py | 10 +
.../types/model_provider_connection.py | 41 ++++
.../model_provider_connection_created_by.py | 5 +
.../model_provider_connection_organization.py | 5 +
.../model_provider_connection_provider.py | 5 +
.../types/model_provider_connection_scope.py | 5 +
tests/test_model_providers.py | 37 ++++
14 files changed, 526 insertions(+)
create mode 100644 .mock/definition/model_providers.yml
create mode 100644 src/label_studio_sdk/model_providers/__init__.py
create mode 100644 src/label_studio_sdk/model_providers/client.py
create mode 100644 src/label_studio_sdk/types/model_provider_connection.py
create mode 100644 src/label_studio_sdk/types/model_provider_connection_created_by.py
create mode 100644 src/label_studio_sdk/types/model_provider_connection_organization.py
create mode 100644 src/label_studio_sdk/types/model_provider_connection_provider.py
create mode 100644 src/label_studio_sdk/types/model_provider_connection_scope.py
create mode 100644 tests/test_model_providers.py
diff --git a/.mock/definition/__package__.yml b/.mock/definition/__package__.yml
index ad3aca086..af3d1aaed 100644
--- a/.mock/definition/__package__.yml
+++ b/.mock/definition/__package__.yml
@@ -2391,6 +2391,46 @@ types:
completed_at: optional
source:
openapi: openapi/openapi.yaml
+ ModelProviderConnectionProvider:
+ enum:
+ - OpenAI
+ - AzureOpenAI
+ source:
+ openapi: openapi/openapi.yaml
+ ModelProviderConnectionScope:
+ enum:
+ - Organization
+ - User
+ - Model
+ source:
+ openapi: openapi/openapi.yaml
+ ModelProviderConnectionOrganization:
+ discriminated: false
+ union:
+ - integer
+ - map
+ source:
+ openapi: openapi/openapi.yaml
+ ModelProviderConnectionCreatedBy:
+ discriminated: false
+ union:
+ - integer
+ - map
+ source:
+ openapi: openapi/openapi.yaml
+ ModelProviderConnection:
+ properties:
+ provider: ModelProviderConnectionProvider
+ api_key: optional
+ deployment_name: optional
+ endpoint: optional
+ scope: optional
+ organization: optional
+ created_by: optional
+ created_at: optional
+ updated_at: optional
+ source:
+ openapi: openapi/openapi.yaml
CommentCreatedBy:
discriminated: false
union:
diff --git a/.mock/definition/model_providers.yml b/.mock/definition/model_providers.yml
new file mode 100644
index 000000000..fbc22f9c1
--- /dev/null
+++ b/.mock/definition/model_providers.yml
@@ -0,0 +1,36 @@
+imports:
+ root: __package__.yml
+service:
+ auth: false
+ base-path: ''
+ endpoints:
+ create:
+ path: /api/model-provider-connections/
+ method: POST
+ auth: true
+ docs: |
+ Create a new model provider connection.
+ display-name: Create model provider connection
+ request:
+ body: root.ModelProviderConnection
+ response:
+ docs: ''
+ type: root.ModelProviderConnection
+ examples:
+ - request:
+ provider: OpenAI
+ response:
+ body:
+ provider: OpenAI
+ api_key: api_key
+ deployment_name: deployment_name
+ endpoint: endpoint
+ scope: Organization
+ organization: 1
+ created_by: 1
+ created_at: '2024-01-15T09:30:00Z'
+ updated_at: '2024-01-15T09:30:00Z'
+ audiences:
+ - public
+ source:
+ openapi: openapi/openapi.yaml
diff --git a/reference.md b/reference.md
index 8f5915f14..2b4192f5b 100644
--- a/reference.md
+++ b/reference.md
@@ -14492,6 +14492,141 @@ client.prompts.versions.create_run(
+
+
+
+
+## ModelProviders
+client.model_providers.create(...)
+
+-
+
+#### 📝 Description
+
+
+-
+
+
+-
+
+Create a new model provider connection.
+
+
+
+
+
+#### 🔌 Usage
+
+
+-
+
+
+-
+
+```python
+from label_studio_sdk.client import LabelStudio
+
+client = LabelStudio(
+ api_key="YOUR_API_KEY",
+)
+client.model_providers.create(
+ provider="OpenAI",
+)
+
+```
+
+
+
+
+
+#### ⚙️ Parameters
+
+
+-
+
+
+-
+
+**provider:** `ModelProviderConnectionProvider`
+
+
+
+
+
+-
+
+**api_key:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**deployment_name:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**endpoint:** `typing.Optional[str]`
+
+
+
+
+
+-
+
+**scope:** `typing.Optional[ModelProviderConnectionScope]`
+
+
+
+
+
+-
+
+**organization:** `typing.Optional[ModelProviderConnectionOrganization]`
+
+
+
+
+
+-
+
+**created_by:** `typing.Optional[ModelProviderConnectionCreatedBy]`
+
+
+
+
+
+-
+
+**created_at:** `typing.Optional[dt.datetime]`
+
+
+
+
+
+-
+
+**updated_at:** `typing.Optional[dt.datetime]`
+
+
+
+
+
+-
+
+**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+
+
+
+
+
diff --git a/src/label_studio_sdk/__init__.py b/src/label_studio_sdk/__init__.py
index f3c7a0cb7..c3cc754cb 100644
--- a/src/label_studio_sdk/__init__.py
+++ b/src/label_studio_sdk/__init__.py
@@ -46,6 +46,11 @@
MlBackend,
MlBackendAuthMethod,
MlBackendState,
+ ModelProviderConnection,
+ ModelProviderConnectionCreatedBy,
+ ModelProviderConnectionOrganization,
+ ModelProviderConnectionProvider,
+ ModelProviderConnectionScope,
Prediction,
Project,
ProjectImport,
@@ -93,6 +98,7 @@
files,
import_storage,
ml,
+ model_providers,
predictions,
projects,
prompts,
@@ -221,6 +227,11 @@
"MlUpdateRequestAuthMethod",
"MlUpdateResponse",
"MlUpdateResponseAuthMethod",
+ "ModelProviderConnection",
+ "ModelProviderConnectionCreatedBy",
+ "ModelProviderConnectionOrganization",
+ "ModelProviderConnectionProvider",
+ "ModelProviderConnectionScope",
"Prediction",
"Project",
"ProjectImport",
@@ -292,6 +303,7 @@
"files",
"import_storage",
"ml",
+ "model_providers",
"predictions",
"projects",
"prompts",
diff --git a/src/label_studio_sdk/base_client.py b/src/label_studio_sdk/base_client.py
index 383a63efc..175150891 100644
--- a/src/label_studio_sdk/base_client.py
+++ b/src/label_studio_sdk/base_client.py
@@ -15,6 +15,7 @@
from .files.client import AsyncFilesClient, FilesClient
from .import_storage.client import AsyncImportStorageClient, ImportStorageClient
from .ml.client import AsyncMlClient, MlClient
+from .model_providers.client import AsyncModelProvidersClient, ModelProvidersClient
from .predictions.client import AsyncPredictionsClient, PredictionsClient
from .projects.client import AsyncProjectsClient, ProjectsClient
from .prompts.client import AsyncPromptsClient, PromptsClient
@@ -100,6 +101,7 @@ def __init__(
self.export_storage = ExportStorageClient(client_wrapper=self._client_wrapper)
self.webhooks = WebhooksClient(client_wrapper=self._client_wrapper)
self.prompts = PromptsClient(client_wrapper=self._client_wrapper)
+ self.model_providers = ModelProvidersClient(client_wrapper=self._client_wrapper)
self.comments = CommentsClient(client_wrapper=self._client_wrapper)
self.workspaces = WorkspacesClient(client_wrapper=self._client_wrapper)
@@ -179,6 +181,7 @@ def __init__(
self.export_storage = AsyncExportStorageClient(client_wrapper=self._client_wrapper)
self.webhooks = AsyncWebhooksClient(client_wrapper=self._client_wrapper)
self.prompts = AsyncPromptsClient(client_wrapper=self._client_wrapper)
+ self.model_providers = AsyncModelProvidersClient(client_wrapper=self._client_wrapper)
self.comments = AsyncCommentsClient(client_wrapper=self._client_wrapper)
self.workspaces = AsyncWorkspacesClient(client_wrapper=self._client_wrapper)
diff --git a/src/label_studio_sdk/model_providers/__init__.py b/src/label_studio_sdk/model_providers/__init__.py
new file mode 100644
index 000000000..f3ea2659b
--- /dev/null
+++ b/src/label_studio_sdk/model_providers/__init__.py
@@ -0,0 +1,2 @@
+# This file was auto-generated by Fern from our API Definition.
+
diff --git a/src/label_studio_sdk/model_providers/client.py b/src/label_studio_sdk/model_providers/client.py
new file mode 100644
index 000000000..0cd59e8a5
--- /dev/null
+++ b/src/label_studio_sdk/model_providers/client.py
@@ -0,0 +1,190 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+from json.decoder import JSONDecodeError
+
+from ..core.api_error import ApiError
+from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
+from ..core.pydantic_utilities import pydantic_v1
+from ..core.request_options import RequestOptions
+from ..types.model_provider_connection import ModelProviderConnection
+from ..types.model_provider_connection_created_by import ModelProviderConnectionCreatedBy
+from ..types.model_provider_connection_organization import ModelProviderConnectionOrganization
+from ..types.model_provider_connection_provider import ModelProviderConnectionProvider
+from ..types.model_provider_connection_scope import ModelProviderConnectionScope
+
+# this is used as the default value for optional parameters
+OMIT = typing.cast(typing.Any, ...)
+
+
+class ModelProvidersClient:
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ def create(
+ self,
+ *,
+ provider: ModelProviderConnectionProvider,
+ api_key: typing.Optional[str] = OMIT,
+ deployment_name: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[str] = OMIT,
+ scope: typing.Optional[ModelProviderConnectionScope] = OMIT,
+ organization: typing.Optional[ModelProviderConnectionOrganization] = OMIT,
+ created_by: typing.Optional[ModelProviderConnectionCreatedBy] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ updated_at: typing.Optional[dt.datetime] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ModelProviderConnection:
+ """
+ Create a new model provider connection.
+
+ Parameters
+ ----------
+ provider : ModelProviderConnectionProvider
+
+ api_key : typing.Optional[str]
+
+ deployment_name : typing.Optional[str]
+
+ endpoint : typing.Optional[str]
+
+ scope : typing.Optional[ModelProviderConnectionScope]
+
+ organization : typing.Optional[ModelProviderConnectionOrganization]
+
+ created_by : typing.Optional[ModelProviderConnectionCreatedBy]
+
+ created_at : typing.Optional[dt.datetime]
+
+ updated_at : typing.Optional[dt.datetime]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ModelProviderConnection
+
+
+ Examples
+ --------
+ from label_studio_sdk.client import LabelStudio
+
+ client = LabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ client.model_providers.create(
+ provider="OpenAI",
+ )
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "api/model-provider-connections/",
+ method="POST",
+ json={
+ "provider": provider,
+ "api_key": api_key,
+ "deployment_name": deployment_name,
+ "endpoint": endpoint,
+ "scope": scope,
+ "organization": organization,
+ "created_by": created_by,
+ "created_at": created_at,
+ "updated_at": updated_at,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ModelProviderConnection, _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
+
+class AsyncModelProvidersClient:
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
+ self._client_wrapper = client_wrapper
+
+ async def create(
+ self,
+ *,
+ provider: ModelProviderConnectionProvider,
+ api_key: typing.Optional[str] = OMIT,
+ deployment_name: typing.Optional[str] = OMIT,
+ endpoint: typing.Optional[str] = OMIT,
+ scope: typing.Optional[ModelProviderConnectionScope] = OMIT,
+ organization: typing.Optional[ModelProviderConnectionOrganization] = OMIT,
+ created_by: typing.Optional[ModelProviderConnectionCreatedBy] = OMIT,
+ created_at: typing.Optional[dt.datetime] = OMIT,
+ updated_at: typing.Optional[dt.datetime] = OMIT,
+ request_options: typing.Optional[RequestOptions] = None
+ ) -> ModelProviderConnection:
+ """
+ Create a new model provider connection.
+
+ Parameters
+ ----------
+ provider : ModelProviderConnectionProvider
+
+ api_key : typing.Optional[str]
+
+ deployment_name : typing.Optional[str]
+
+ endpoint : typing.Optional[str]
+
+ scope : typing.Optional[ModelProviderConnectionScope]
+
+ organization : typing.Optional[ModelProviderConnectionOrganization]
+
+ created_by : typing.Optional[ModelProviderConnectionCreatedBy]
+
+ created_at : typing.Optional[dt.datetime]
+
+ updated_at : typing.Optional[dt.datetime]
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ ModelProviderConnection
+
+
+ Examples
+ --------
+ from label_studio_sdk.client import AsyncLabelStudio
+
+ client = AsyncLabelStudio(
+ api_key="YOUR_API_KEY",
+ )
+ await client.model_providers.create(
+ provider="OpenAI",
+ )
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "api/model-provider-connections/",
+ method="POST",
+ json={
+ "provider": provider,
+ "api_key": api_key,
+ "deployment_name": deployment_name,
+ "endpoint": endpoint,
+ "scope": scope,
+ "organization": organization,
+ "created_by": created_by,
+ "created_at": created_at,
+ "updated_at": updated_at,
+ },
+ request_options=request_options,
+ omit=OMIT,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ return pydantic_v1.parse_obj_as(ModelProviderConnection, _response.json()) # type: ignore
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
diff --git a/src/label_studio_sdk/types/__init__.py b/src/label_studio_sdk/types/__init__.py
index 984dd768c..407b9e7cb 100644
--- a/src/label_studio_sdk/types/__init__.py
+++ b/src/label_studio_sdk/types/__init__.py
@@ -45,6 +45,11 @@
from .ml_backend import MlBackend
from .ml_backend_auth_method import MlBackendAuthMethod
from .ml_backend_state import MlBackendState
+from .model_provider_connection import ModelProviderConnection
+from .model_provider_connection_created_by import ModelProviderConnectionCreatedBy
+from .model_provider_connection_organization import ModelProviderConnectionOrganization
+from .model_provider_connection_provider import ModelProviderConnectionProvider
+from .model_provider_connection_scope import ModelProviderConnectionScope
from .prediction import Prediction
from .project import Project
from .project_import import ProjectImport
@@ -129,6 +134,11 @@
"MlBackend",
"MlBackendAuthMethod",
"MlBackendState",
+ "ModelProviderConnection",
+ "ModelProviderConnectionCreatedBy",
+ "ModelProviderConnectionOrganization",
+ "ModelProviderConnectionProvider",
+ "ModelProviderConnectionScope",
"Prediction",
"Project",
"ProjectImport",
diff --git a/src/label_studio_sdk/types/model_provider_connection.py b/src/label_studio_sdk/types/model_provider_connection.py
new file mode 100644
index 000000000..6e9f784d9
--- /dev/null
+++ b/src/label_studio_sdk/types/model_provider_connection.py
@@ -0,0 +1,41 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import datetime as dt
+import typing
+
+from ..core.datetime_utils import serialize_datetime
+from ..core.pydantic_utilities import deep_union_pydantic_dicts, pydantic_v1
+from .model_provider_connection_created_by import ModelProviderConnectionCreatedBy
+from .model_provider_connection_organization import ModelProviderConnectionOrganization
+from .model_provider_connection_provider import ModelProviderConnectionProvider
+from .model_provider_connection_scope import ModelProviderConnectionScope
+
+
+class ModelProviderConnection(pydantic_v1.BaseModel):
+ provider: ModelProviderConnectionProvider
+ api_key: typing.Optional[str] = None
+ deployment_name: typing.Optional[str] = None
+ endpoint: typing.Optional[str] = None
+ scope: typing.Optional[ModelProviderConnectionScope] = None
+ organization: typing.Optional[ModelProviderConnectionOrganization] = None
+ created_by: typing.Optional[ModelProviderConnectionCreatedBy] = None
+ created_at: typing.Optional[dt.datetime] = None
+ updated_at: typing.Optional[dt.datetime] = None
+
+ def json(self, **kwargs: typing.Any) -> str:
+ kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ return super().json(**kwargs_with_defaults)
+
+ def dict(self, **kwargs: typing.Any) -> typing.Dict[str, typing.Any]:
+ kwargs_with_defaults_exclude_unset: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
+ kwargs_with_defaults_exclude_none: typing.Any = {"by_alias": True, "exclude_none": True, **kwargs}
+
+ return deep_union_pydantic_dicts(
+ super().dict(**kwargs_with_defaults_exclude_unset), super().dict(**kwargs_with_defaults_exclude_none)
+ )
+
+ class Config:
+ frozen = True
+ smart_union = True
+ extra = pydantic_v1.Extra.allow
+ json_encoders = {dt.datetime: serialize_datetime}
diff --git a/src/label_studio_sdk/types/model_provider_connection_created_by.py b/src/label_studio_sdk/types/model_provider_connection_created_by.py
new file mode 100644
index 000000000..9ec9d319d
--- /dev/null
+++ b/src/label_studio_sdk/types/model_provider_connection_created_by.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ModelProviderConnectionCreatedBy = typing.Union[int, typing.Dict[str, typing.Any]]
diff --git a/src/label_studio_sdk/types/model_provider_connection_organization.py b/src/label_studio_sdk/types/model_provider_connection_organization.py
new file mode 100644
index 000000000..0ce796632
--- /dev/null
+++ b/src/label_studio_sdk/types/model_provider_connection_organization.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ModelProviderConnectionOrganization = typing.Union[int, typing.Dict[str, typing.Any]]
diff --git a/src/label_studio_sdk/types/model_provider_connection_provider.py b/src/label_studio_sdk/types/model_provider_connection_provider.py
new file mode 100644
index 000000000..c4f11b7df
--- /dev/null
+++ b/src/label_studio_sdk/types/model_provider_connection_provider.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ModelProviderConnectionProvider = typing.Union[typing.Literal["OpenAI", "AzureOpenAI"], typing.Any]
diff --git a/src/label_studio_sdk/types/model_provider_connection_scope.py b/src/label_studio_sdk/types/model_provider_connection_scope.py
new file mode 100644
index 000000000..e5586694a
--- /dev/null
+++ b/src/label_studio_sdk/types/model_provider_connection_scope.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ModelProviderConnectionScope = typing.Union[typing.Literal["Organization", "User", "Model"], typing.Any]
diff --git a/tests/test_model_providers.py b/tests/test_model_providers.py
new file mode 100644
index 000000000..73f0e226d
--- /dev/null
+++ b/tests/test_model_providers.py
@@ -0,0 +1,37 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+from label_studio_sdk.client import AsyncLabelStudio, LabelStudio
+
+from .utilities import validate_response
+
+
+async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> None:
+ expected_response: typing.Any = {
+ "provider": "OpenAI",
+ "api_key": "api_key",
+ "deployment_name": "deployment_name",
+ "endpoint": "endpoint",
+ "scope": "Organization",
+ "organization": 1,
+ "created_by": 1,
+ "created_at": "2024-01-15T09:30:00Z",
+ "updated_at": "2024-01-15T09:30:00Z",
+ }
+ expected_types: typing.Any = {
+ "provider": None,
+ "api_key": None,
+ "deployment_name": None,
+ "endpoint": None,
+ "scope": None,
+ "organization": "integer",
+ "created_by": "integer",
+ "created_at": "datetime",
+ "updated_at": "datetime",
+ }
+ response = client.model_providers.create(provider="OpenAI")
+ validate_response(response, expected_response, expected_types)
+
+ async_response = await async_client.model_providers.create(provider="OpenAI")
+ validate_response(async_response, expected_response, expected_types)
From 748b735302e65feafecdfa0d331f541719228c5b Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Thu, 15 Aug 2024 17:06:22 +0000
Subject: [PATCH 10/14] SDK regeneration
---
.mock/definition/__package__.yml | 3 +++
.mock/definition/prompts.yml | 2 ++
reference.md | 8 ++++++++
src/label_studio_sdk/prompts/client.py | 10 ++++++++++
src/label_studio_sdk/types/prompt.py | 5 +++++
tests/test_prompts.py | 4 ++++
6 files changed, 32 insertions(+)
diff --git a/.mock/definition/__package__.yml b/.mock/definition/__package__.yml
index af3d1aaed..9e608b616 100644
--- a/.mock/definition/__package__.yml
+++ b/.mock/definition/__package__.yml
@@ -2305,6 +2305,9 @@ types:
associated_projects:
type: optional>
docs: List of associated projects IDs
+ skill_name:
+ type: optional
+ docs: Name of the skill
source:
openapi: openapi/openapi.yaml
PromptVersionProvider:
diff --git a/.mock/definition/prompts.yml b/.mock/definition/prompts.yml
index b7264070d..285b70d3f 100644
--- a/.mock/definition/prompts.yml
+++ b/.mock/definition/prompts.yml
@@ -29,6 +29,7 @@ service:
- output_classes
associated_projects:
- 1
+ skill_name: skill_name
audiences:
- public
create:
@@ -64,6 +65,7 @@ service:
- output_classes
associated_projects:
- 1
+ skill_name: skill_name
audiences:
- public
batch_predictions:
diff --git a/reference.md b/reference.md
index 2b4192f5b..3d2dce7ae 100644
--- a/reference.md
+++ b/reference.md
@@ -14068,6 +14068,14 @@ client.prompts.create(
-
+**skill_name:** `typing.Optional[str]` — Name of the skill
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
diff --git a/src/label_studio_sdk/prompts/client.py b/src/label_studio_sdk/prompts/client.py
index 3d8cc39d0..c9d139697 100644
--- a/src/label_studio_sdk/prompts/client.py
+++ b/src/label_studio_sdk/prompts/client.py
@@ -69,6 +69,7 @@ def create(
updated_at: typing.Optional[dt.datetime] = OMIT,
organization: typing.Optional[PromptOrganization] = OMIT,
associated_projects: typing.Optional[typing.Sequence[int]] = OMIT,
+ skill_name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None
) -> Prompt:
"""
@@ -103,6 +104,9 @@ def create(
associated_projects : typing.Optional[typing.Sequence[int]]
List of associated projects IDs
+ skill_name : typing.Optional[str]
+ Name of the skill
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -137,6 +141,7 @@ def create(
"input_fields": input_fields,
"output_classes": output_classes,
"associated_projects": associated_projects,
+ "skill_name": skill_name,
},
request_options=request_options,
omit=OMIT,
@@ -249,6 +254,7 @@ async def create(
updated_at: typing.Optional[dt.datetime] = OMIT,
organization: typing.Optional[PromptOrganization] = OMIT,
associated_projects: typing.Optional[typing.Sequence[int]] = OMIT,
+ skill_name: typing.Optional[str] = OMIT,
request_options: typing.Optional[RequestOptions] = None
) -> Prompt:
"""
@@ -283,6 +289,9 @@ async def create(
associated_projects : typing.Optional[typing.Sequence[int]]
List of associated projects IDs
+ skill_name : typing.Optional[str]
+ Name of the skill
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -317,6 +326,7 @@ async def create(
"input_fields": input_fields,
"output_classes": output_classes,
"associated_projects": associated_projects,
+ "skill_name": skill_name,
},
request_options=request_options,
omit=OMIT,
diff --git a/src/label_studio_sdk/types/prompt.py b/src/label_studio_sdk/types/prompt.py
index 728cf4949..c732a57b4 100644
--- a/src/label_studio_sdk/types/prompt.py
+++ b/src/label_studio_sdk/types/prompt.py
@@ -55,6 +55,11 @@ class Prompt(pydantic_v1.BaseModel):
List of associated projects IDs
"""
+ skill_name: typing.Optional[str] = pydantic_v1.Field(default=None)
+ """
+ Name of the skill
+ """
+
def json(self, **kwargs: typing.Any) -> str:
kwargs_with_defaults: typing.Any = {"by_alias": True, "exclude_unset": True, **kwargs}
return super().json(**kwargs_with_defaults)
diff --git a/tests/test_prompts.py b/tests/test_prompts.py
index c9495480b..4517ff868 100644
--- a/tests/test_prompts.py
+++ b/tests/test_prompts.py
@@ -19,6 +19,7 @@ async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> Non
"input_fields": ["input_fields"],
"output_classes": ["output_classes"],
"associated_projects": [1],
+ "skill_name": "skill_name",
}
]
expected_types: typing.Any = (
@@ -34,6 +35,7 @@ async def test_list_(client: LabelStudio, async_client: AsyncLabelStudio) -> Non
"input_fields": ("list", {0: None}),
"output_classes": ("list", {0: None}),
"associated_projects": ("list", {0: "integer"}),
+ "skill_name": None,
}
},
)
@@ -55,6 +57,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No
"input_fields": ["input_fields"],
"output_classes": ["output_classes"],
"associated_projects": [1],
+ "skill_name": "skill_name",
}
expected_types: typing.Any = {
"title": None,
@@ -66,6 +69,7 @@ async def test_create(client: LabelStudio, async_client: AsyncLabelStudio) -> No
"input_fields": ("list", {0: None}),
"output_classes": ("list", {0: None}),
"associated_projects": ("list", {0: "integer"}),
+ "skill_name": None,
}
response = client.prompts.create(title="title", input_fields=["input_fields"], output_classes=["output_classes"])
validate_response(response, expected_response, expected_types)
From cd4223007cec29efe2e217f48ca6e28b44200678 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Thu, 15 Aug 2024 18:04:56 +0000
Subject: [PATCH 11/14] SDK regeneration
---
.mock/definition/prompts.yml | 2 +-
src/label_studio_sdk/prompts/client.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/.mock/definition/prompts.yml b/.mock/definition/prompts.yml
index 285b70d3f..355d276c6 100644
--- a/.mock/definition/prompts.yml
+++ b/.mock/definition/prompts.yml
@@ -69,7 +69,7 @@ service:
audiences:
- public
batch_predictions:
- path: /api/model-run/batch-predictions/
+ path: /api/model-run/batch-predictions
method: POST
auth: true
docs: |
diff --git a/src/label_studio_sdk/prompts/client.py b/src/label_studio_sdk/prompts/client.py
index c9d139697..310f21423 100644
--- a/src/label_studio_sdk/prompts/client.py
+++ b/src/label_studio_sdk/prompts/client.py
@@ -188,7 +188,7 @@ def batch_predictions(
client.prompts.batch_predictions()
"""
_response = self._client_wrapper.httpx_client.request(
- "api/model-run/batch-predictions/",
+ "api/model-run/batch-predictions",
method="POST",
json={"modelrun_id": modelrun_id, "results": results},
request_options=request_options,
@@ -373,7 +373,7 @@ async def batch_predictions(
await client.prompts.batch_predictions()
"""
_response = await self._client_wrapper.httpx_client.request(
- "api/model-run/batch-predictions/",
+ "api/model-run/batch-predictions",
method="POST",
json={"modelrun_id": modelrun_id, "results": results},
request_options=request_options,
From c3515b937ea26c19566dca2710c9515c1b43a344 Mon Sep 17 00:00:00 2001
From: nik
Date: Thu, 15 Aug 2024 19:43:55 +0100
Subject: [PATCH 12/14] Fix create_regions() function
---
src/label_studio_sdk/label_interface/interface.py | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/src/label_studio_sdk/label_interface/interface.py b/src/label_studio_sdk/label_interface/interface.py
index 857ac9084..c4caf37fb 100644
--- a/src/label_studio_sdk/label_interface/interface.py
+++ b/src/label_studio_sdk/label_interface/interface.py
@@ -300,13 +300,13 @@ def __init__(self, config: str, tags_mapping=None, *args, **kwargs):
self._labels = labels
self._tree = tree
- def create_regions(self, data: Dict) -> List[Region]:
+ def create_regions(self, data: Dict[str, Union[Dict, List[Dict]]]) -> List[Region]:
"""
Takes raw data representation and maps keys to control tag names.
If name is not found, it will be skipped
Args:
- data (Dict): Raw data representation. Example: {"choices_name": "Positive", "labels_name": [{"start": 0, "end": 10, "value": "person"}]}
+ data (Dict): Raw data representation. Example: {"choices_name": "Positive", "labels_name": [{"start": 0, "end": 10, "label": "person"}]}
raise_if_control_not_found (bool): Raise an exception if control tag is not found.
"""
regions = []
@@ -316,7 +316,11 @@ def create_regions(self, data: Dict) -> List[Region]:
continue
control = self._controls[control_tag_name]
- regions.append(control.label(**payload))
+ if isinstance(payload, Dict):
+ payload = [payload]
+ for item in payload:
+ # TODO: allow control.label to process custom payload outside of those strictly containing "label"
+ regions.append(control.label(**item))
return regions
From 67f87365aded8aa34c733f8e1bab377c19df9d1d Mon Sep 17 00:00:00 2001
From: nik
Date: Thu, 15 Aug 2024 20:12:46 +0100
Subject: [PATCH 13/14] Fix string instance payload
---
src/label_studio_sdk/label_interface/interface.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/src/label_studio_sdk/label_interface/interface.py b/src/label_studio_sdk/label_interface/interface.py
index c4caf37fb..e14b7eb88 100644
--- a/src/label_studio_sdk/label_interface/interface.py
+++ b/src/label_studio_sdk/label_interface/interface.py
@@ -316,10 +316,16 @@ def create_regions(self, data: Dict[str, Union[Dict, List[Dict]]]) -> List[Regio
continue
control = self._controls[control_tag_name]
+ # TODO: I don't really like this part, looks like a workaround
+ # 1. we should allow control.label to process custom payload outside of those strictly containing "label"
+ # 2. we should be less open regarding the payload type and defining the strict typing elsewhere,
+ # but likely that requires rewriting of how ControlTag.label() is working now
+ if isinstance(payload, str):
+ payload = {'label': payload}
if isinstance(payload, Dict):
payload = [payload]
for item in payload:
- # TODO: allow control.label to process custom payload outside of those strictly containing "label"
+
regions.append(control.label(**item))
return regions
From ada1b4d877735a3f599b02803c549ac14ad4206c Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Tue, 20 Aug 2024 16:18:21 +0000
Subject: [PATCH 14/14] SDK regeneration
---
poetry.lock | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index f4e1a8b4e..efc4be167 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -387,13 +387,13 @@ files = [
[[package]]
name = "importlib-resources"
-version = "6.4.2"
+version = "6.4.3"
description = "Read resources from Python packages"
optional = false
python-versions = ">=3.8"
files = [
- {file = "importlib_resources-6.4.2-py3-none-any.whl", hash = "sha256:8bba8c54a8a3afaa1419910845fa26ebd706dc716dd208d9b158b4b6966f5c5c"},
- {file = "importlib_resources-6.4.2.tar.gz", hash = "sha256:6cbfbefc449cc6e2095dd184691b7a12a04f40bc75dd4c55d31c34f174cdf57a"},
+ {file = "importlib_resources-6.4.3-py3-none-any.whl", hash = "sha256:2d6dfe3b9e055f72495c2085890837fc8c758984e209115c8792bddcb762cd93"},
+ {file = "importlib_resources-6.4.3.tar.gz", hash = "sha256:4a202b9b9d38563b46da59221d77bb73862ab5d79d461307bcb826d725448b98"},
]
[package.dependencies]
@@ -676,13 +676,13 @@ files = [
[[package]]
name = "nltk"
-version = "3.8.1"
+version = "3.9.1"
description = "Natural Language Toolkit"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "nltk-3.8.1-py3-none-any.whl", hash = "sha256:fd5c9109f976fa86bcadba8f91e47f5e9293bd034474752e92a520f81c93dda5"},
- {file = "nltk-3.8.1.zip", hash = "sha256:1834da3d0682cba4f2cede2f9aad6b0fafb6461ba451db0efb6f9c39798d64d3"},
+ {file = "nltk-3.9.1-py3-none-any.whl", hash = "sha256:4fa26829c5b00715afe3061398a8989dc643b92ce7dd93fb4585a70930d168a1"},
+ {file = "nltk-3.9.1.tar.gz", hash = "sha256:87d127bd3de4bd89a4f81265e5fa59cb1b199b27440175370f7417d2bc7ae868"},
]
[package.dependencies]